From a3dcdc051f38c4893317757ac925b9b5c79350ef Mon Sep 17 00:00:00 2001 From: Shuhei Matsumoto Date: Wed, 7 Jul 2021 04:42:41 +0900 Subject: [PATCH] bdev/nvme: Rename nvme_bdev_ctrlr by nvme_ctrlr This object is per I/O path and will be aggregated by an new upper layer object. Hence rename nvme_bdev_ctrlr by nvme_ctrlr. Then the following patches will add nvme_bdev_ctrlr as a different upper layer object. Signed-off-by: Shuhei Matsumoto Change-Id: Ieed634447785cc98140b3d49c52a2c753988ece7 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8381 Tested-by: SPDK CI Jenkins Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Reviewed-by: Changpeng Liu Reviewed-by: Aleksey Marchuk --- module/bdev/nvme/bdev_nvme.c | 510 +++++++++--------- module/bdev/nvme/bdev_nvme_cuse_rpc.c | 8 +- module/bdev/nvme/bdev_nvme_rpc.c | 18 +- module/bdev/nvme/bdev_ocssd.c | 48 +- module/bdev/nvme/bdev_ocssd.h | 8 +- module/bdev/nvme/common.c | 114 ++-- module/bdev/nvme/common.h | 26 +- module/bdev/nvme/nvme_rpc.c | 10 +- module/bdev/nvme/vbdev_opal.c | 14 +- module/bdev/nvme/vbdev_opal_rpc.c | 8 +- .../lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c | 366 ++++++------- .../bdev/nvme/bdev_ocssd.c/bdev_ocssd_ut.c | 108 ++-- 12 files changed, 619 insertions(+), 619 deletions(-) diff --git a/module/bdev/nvme/bdev_nvme.c b/module/bdev/nvme/bdev_nvme.c index ddde2d21b0..7d94670b21 100644 --- a/module/bdev/nvme/bdev_nvme.c +++ b/module/bdev/nvme/bdev_nvme.c @@ -140,9 +140,9 @@ static struct spdk_poller *g_hotplug_poller; static struct spdk_poller *g_hotplug_probe_poller; static struct spdk_nvme_probe_ctx *g_hotplug_probe_ctx; -static void nvme_ctrlr_populate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +static void nvme_ctrlr_populate_namespaces(struct nvme_ctrlr *nvme_ctrlr, struct nvme_async_probe_ctx *ctx); -static void nvme_ctrlr_populate_namespaces_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +static void nvme_ctrlr_populate_namespaces_done(struct nvme_ctrlr *nvme_ctrlr, struct nvme_async_probe_ctx *ctx); static int bdev_nvme_library_init(void); static void bdev_nvme_library_fini(void); @@ -188,12 +188,12 @@ static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qp static int bdev_nvme_abort(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort); static int bdev_nvme_reset(struct nvme_ctrlr_channel *ctrlr_ch, struct spdk_bdev_io *bdev_io); -static int bdev_nvme_failover(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove); +static int bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove); static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr); -typedef void (*populate_namespace_fn)(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +typedef void (*populate_namespace_fn)(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx); -static void nvme_ctrlr_populate_standard_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +static void nvme_ctrlr_populate_standard_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx); static populate_namespace_fn g_populate_namespace_fn[] = { @@ -267,9 +267,9 @@ bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_ctrlr_channel *ctrlr static inline bool bdev_nvme_find_admin_path(struct nvme_ctrlr_channel *ctrlr_ch, - struct nvme_bdev_ctrlr **_nvme_bdev_ctrlr) + struct nvme_ctrlr **_nvme_ctrlr) { - *_nvme_bdev_ctrlr = ctrlr_ch->ctrlr; + *_nvme_ctrlr = ctrlr_ch->ctrlr; return true; } @@ -344,13 +344,13 @@ static int bdev_nvme_poll_adminq(void *arg) { int32_t rc; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = arg; + struct nvme_ctrlr *nvme_ctrlr = arg; - assert(nvme_bdev_ctrlr != NULL); + assert(nvme_ctrlr != NULL); - rc = spdk_nvme_ctrlr_process_admin_completions(nvme_bdev_ctrlr->ctrlr); + rc = spdk_nvme_ctrlr_process_admin_completions(nvme_ctrlr->ctrlr); if (rc < 0) { - bdev_nvme_failover(nvme_bdev_ctrlr, false); + bdev_nvme_failover(nvme_ctrlr, false); } return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY; @@ -369,7 +369,7 @@ bdev_nvme_destruct(void *ctx) if (!nvme_ns->populated) { pthread_mutex_unlock(&nvme_ns->ctrlr->mutex); - nvme_bdev_ctrlr_release(nvme_ns->ctrlr); + nvme_ctrlr_release(nvme_ns->ctrlr); } else { pthread_mutex_unlock(&nvme_ns->ctrlr->mutex); } @@ -442,26 +442,26 @@ bdev_nvme_destroy_qpair(struct nvme_ctrlr_channel *ctrlr_ch) } static void -_bdev_nvme_check_pending_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +_bdev_nvme_check_pending_destruct(struct nvme_ctrlr *nvme_ctrlr) { - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); - if (nvme_bdev_ctrlr->destruct_after_reset) { - assert(nvme_bdev_ctrlr->ref == 0 && nvme_bdev_ctrlr->destruct); - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); + if (nvme_ctrlr->destruct_after_reset) { + assert(nvme_ctrlr->ref == 0 && nvme_ctrlr->destruct); + pthread_mutex_unlock(&nvme_ctrlr->mutex); - spdk_thread_send_msg(nvme_bdev_ctrlr->thread, nvme_bdev_ctrlr_unregister, - nvme_bdev_ctrlr); + spdk_thread_send_msg(nvme_ctrlr->thread, nvme_ctrlr_unregister, + nvme_ctrlr); } else { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); } } static void bdev_nvme_check_pending_destruct(struct spdk_io_channel_iter *i, int status) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = spdk_io_channel_iter_get_ctx(i); + struct nvme_ctrlr *nvme_ctrlr = spdk_io_channel_iter_get_ctx(i); - _bdev_nvme_check_pending_destruct(nvme_bdev_ctrlr); + _bdev_nvme_check_pending_destruct(nvme_ctrlr); } static void @@ -500,7 +500,7 @@ bdev_nvme_abort_pending_resets(struct spdk_io_channel_iter *i) } static void -bdev_nvme_reset_io_complete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +bdev_nvme_reset_io_complete(struct nvme_ctrlr *nvme_ctrlr, struct spdk_bdev_io *bdev_io, int rc) { enum spdk_bdev_io_status io_status = SPDK_BDEV_IO_STATUS_SUCCESS; @@ -512,20 +512,20 @@ bdev_nvme_reset_io_complete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, spdk_bdev_io_complete(bdev_io, io_status); /* Make sure we clear any pending resets before returning. */ - spdk_for_each_channel(nvme_bdev_ctrlr, + spdk_for_each_channel(nvme_ctrlr, rc == 0 ? bdev_nvme_complete_pending_resets : bdev_nvme_abort_pending_resets, - nvme_bdev_ctrlr, + nvme_ctrlr, bdev_nvme_check_pending_destruct); } static void -_bdev_nvme_reset_complete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, int rc) +_bdev_nvme_reset_complete(struct nvme_ctrlr *nvme_ctrlr, int rc) { struct nvme_ctrlr_trid *curr_trid; - struct spdk_bdev_io *bdev_io = nvme_bdev_ctrlr->reset_bdev_io; + struct spdk_bdev_io *bdev_io = nvme_ctrlr->reset_bdev_io; - nvme_bdev_ctrlr->reset_bdev_io = NULL; + nvme_ctrlr->reset_bdev_io = NULL; if (rc) { SPDK_ERRLOG("Resetting controller failed.\n"); @@ -533,31 +533,31 @@ _bdev_nvme_reset_complete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, int rc) SPDK_NOTICELOG("Resetting controller successful.\n"); } - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); - nvme_bdev_ctrlr->resetting = false; - nvme_bdev_ctrlr->failover_in_progress = false; + pthread_mutex_lock(&nvme_ctrlr->mutex); + nvme_ctrlr->resetting = false; + nvme_ctrlr->failover_in_progress = false; - curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); + curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); assert(curr_trid != NULL); - assert(&curr_trid->trid == nvme_bdev_ctrlr->connected_trid); + assert(&curr_trid->trid == nvme_ctrlr->connected_trid); curr_trid->is_failed = rc != 0 ? true : false; - if (nvme_bdev_ctrlr->ref == 0 && nvme_bdev_ctrlr->destruct) { + if (nvme_ctrlr->ref == 0 && nvme_ctrlr->destruct) { /* Destruct ctrlr after clearing pending resets. */ - nvme_bdev_ctrlr->destruct_after_reset = true; + nvme_ctrlr->destruct_after_reset = true; } - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); if (bdev_io) { - bdev_nvme_reset_io_complete(nvme_bdev_ctrlr, bdev_io, rc); + bdev_nvme_reset_io_complete(nvme_ctrlr, bdev_io, rc); } else { /* Make sure we clear any pending resets before returning. */ - spdk_for_each_channel(nvme_bdev_ctrlr, + spdk_for_each_channel(nvme_ctrlr, rc == 0 ? bdev_nvme_complete_pending_resets : bdev_nvme_abort_pending_resets, - nvme_bdev_ctrlr, + nvme_ctrlr, bdev_nvme_check_pending_destruct); } } @@ -565,9 +565,9 @@ _bdev_nvme_reset_complete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, int rc) static void _bdev_nvme_reset_create_qpairs_done(struct spdk_io_channel_iter *i, int status) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = spdk_io_channel_iter_get_ctx(i); + struct nvme_ctrlr *nvme_ctrlr = spdk_io_channel_iter_get_ctx(i); - _bdev_nvme_reset_complete(nvme_bdev_ctrlr, status); + _bdev_nvme_reset_complete(nvme_ctrlr, status); } static void @@ -585,7 +585,7 @@ _bdev_nvme_reset_create_qpair(struct spdk_io_channel_iter *i) static void _bdev_nvme_reset_ctrlr(struct spdk_io_channel_iter *i, int status) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = spdk_io_channel_iter_get_ctx(i); + struct nvme_ctrlr *nvme_ctrlr = spdk_io_channel_iter_get_ctx(i); int rc; if (status) { @@ -593,20 +593,20 @@ _bdev_nvme_reset_ctrlr(struct spdk_io_channel_iter *i, int status) goto err; } - rc = spdk_nvme_ctrlr_reset(nvme_bdev_ctrlr->ctrlr); + rc = spdk_nvme_ctrlr_reset(nvme_ctrlr->ctrlr); if (rc != 0) { goto err; } /* Recreate all of the I/O queue pairs */ - spdk_for_each_channel(nvme_bdev_ctrlr, + spdk_for_each_channel(nvme_ctrlr, _bdev_nvme_reset_create_qpair, - nvme_bdev_ctrlr, + nvme_ctrlr, _bdev_nvme_reset_create_qpairs_done); return; err: - _bdev_nvme_reset_complete(nvme_bdev_ctrlr, rc); + _bdev_nvme_reset_complete(nvme_ctrlr, rc); } static void @@ -620,27 +620,27 @@ _bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i) } static int -_bdev_nvme_reset(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +_bdev_nvme_reset(struct nvme_ctrlr *nvme_ctrlr) { - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); - if (nvme_bdev_ctrlr->destruct) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); + if (nvme_ctrlr->destruct) { + pthread_mutex_unlock(&nvme_ctrlr->mutex); return -EBUSY; } - if (nvme_bdev_ctrlr->resetting) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + if (nvme_ctrlr->resetting) { + pthread_mutex_unlock(&nvme_ctrlr->mutex); SPDK_NOTICELOG("Unable to perform reset, already in progress.\n"); return -EAGAIN; } - nvme_bdev_ctrlr->resetting = true; - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + nvme_ctrlr->resetting = true; + pthread_mutex_unlock(&nvme_ctrlr->mutex); /* First, delete all NVMe I/O queue pairs. */ - spdk_for_each_channel(nvme_bdev_ctrlr, + spdk_for_each_channel(nvme_ctrlr, _bdev_nvme_reset_destroy_qpair, - nvme_bdev_ctrlr, + nvme_ctrlr, _bdev_nvme_reset_ctrlr); return 0; @@ -670,35 +670,35 @@ bdev_nvme_reset(struct nvme_ctrlr_channel *ctrlr_ch, struct spdk_bdev_io *bdev_i } static int -_bdev_nvme_failover_start(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove) +_bdev_nvme_failover_start(struct nvme_ctrlr *nvme_ctrlr, bool remove) { struct nvme_ctrlr_trid *curr_trid = NULL, *next_trid = NULL; int rc; - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); - if (nvme_bdev_ctrlr->destruct) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); + if (nvme_ctrlr->destruct) { + pthread_mutex_unlock(&nvme_ctrlr->mutex); /* Don't bother resetting if the controller is in the process of being destructed. */ return -EBUSY; } - curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); + curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); assert(curr_trid); - assert(&curr_trid->trid == nvme_bdev_ctrlr->connected_trid); + assert(&curr_trid->trid == nvme_ctrlr->connected_trid); next_trid = TAILQ_NEXT(curr_trid, link); - if (nvme_bdev_ctrlr->resetting) { - if (next_trid && !nvme_bdev_ctrlr->failover_in_progress) { + if (nvme_ctrlr->resetting) { + if (next_trid && !nvme_ctrlr->failover_in_progress) { rc = -EAGAIN; } else { rc = -EBUSY; } - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); SPDK_NOTICELOG("Unable to perform reset, already in progress.\n"); return rc; } - nvme_bdev_ctrlr->resetting = true; + nvme_ctrlr->resetting = true; curr_trid->is_failed = true; if (next_trid) { @@ -707,37 +707,37 @@ _bdev_nvme_failover_start(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove) SPDK_NOTICELOG("Start failover from %s:%s to %s:%s\n", curr_trid->trid.traddr, curr_trid->trid.trsvcid, next_trid->trid.traddr, next_trid->trid.trsvcid); - nvme_bdev_ctrlr->failover_in_progress = true; - spdk_nvme_ctrlr_fail(nvme_bdev_ctrlr->ctrlr); - nvme_bdev_ctrlr->connected_trid = &next_trid->trid; - rc = spdk_nvme_ctrlr_set_trid(nvme_bdev_ctrlr->ctrlr, &next_trid->trid); + nvme_ctrlr->failover_in_progress = true; + spdk_nvme_ctrlr_fail(nvme_ctrlr->ctrlr); + nvme_ctrlr->connected_trid = &next_trid->trid; + rc = spdk_nvme_ctrlr_set_trid(nvme_ctrlr->ctrlr, &next_trid->trid); assert(rc == 0); - TAILQ_REMOVE(&nvme_bdev_ctrlr->trids, curr_trid, link); + TAILQ_REMOVE(&nvme_ctrlr->trids, curr_trid, link); if (!remove) { /** Shuffle the old trid to the end of the list and use the new one. * Allows for round robin through multiple connections. */ - TAILQ_INSERT_TAIL(&nvme_bdev_ctrlr->trids, curr_trid, link); + TAILQ_INSERT_TAIL(&nvme_ctrlr->trids, curr_trid, link); } else { free(curr_trid); } } - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); return 0; } static int -bdev_nvme_failover(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove) +bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove) { int rc; - rc = _bdev_nvme_failover_start(nvme_bdev_ctrlr, remove); + rc = _bdev_nvme_failover_start(nvme_ctrlr, remove); if (rc == 0) { /* First, delete all NVMe I/O queue pairs. */ - spdk_for_each_channel(nvme_bdev_ctrlr, + spdk_for_each_channel(nvme_ctrlr, _bdev_nvme_reset_destroy_qpair, - nvme_bdev_ctrlr, + nvme_ctrlr, _bdev_nvme_reset_ctrlr); } else if (rc != -EBUSY) { return rc; @@ -1018,12 +1018,12 @@ bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) static int bdev_nvme_create_ctrlr_channel_cb(void *io_device, void *ctx_buf) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = io_device; + struct nvme_ctrlr *nvme_ctrlr = io_device; struct nvme_ctrlr_channel *ctrlr_ch = ctx_buf; struct spdk_io_channel *pg_ch; int rc; - pg_ch = spdk_get_io_channel(&g_nvme_bdev_ctrlrs); + pg_ch = spdk_get_io_channel(&g_nvme_ctrlrs); if (!pg_ch) { return -1; } @@ -1038,14 +1038,14 @@ bdev_nvme_create_ctrlr_channel_cb(void *io_device, void *ctx_buf) TAILQ_INIT(&ctrlr_ch->pending_resets); - if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_bdev_ctrlr->ctrlr)) { + if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_ctrlr->ctrlr)) { rc = bdev_ocssd_create_io_channel(ctrlr_ch); if (rc != 0) { goto err_ocssd_ch; } } - ctrlr_ch->ctrlr = nvme_bdev_ctrlr; + ctrlr_ch->ctrlr = nvme_ctrlr; rc = bdev_nvme_create_qpair(ctrlr_ch); if (rc != 0) { @@ -1455,7 +1455,7 @@ nvme_disk_create(struct spdk_bdev *disk, const char *base_name, } static int -nvme_bdev_create(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, struct nvme_ns *nvme_ns) +nvme_bdev_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns) { struct nvme_bdev *bdev; int rc; @@ -1467,10 +1467,10 @@ nvme_bdev_create(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, struct nvme_ns *nvme_n } bdev->nvme_ns = nvme_ns; - bdev->opal = nvme_bdev_ctrlr->opal_dev != NULL; + bdev->opal = nvme_ctrlr->opal_dev != NULL; - rc = nvme_disk_create(&bdev->disk, nvme_bdev_ctrlr->name, nvme_bdev_ctrlr->ctrlr, - nvme_ns->ns, nvme_bdev_ctrlr->prchk_flags, bdev); + rc = nvme_disk_create(&bdev->disk, nvme_ctrlr->name, nvme_ctrlr->ctrlr, + nvme_ns->ns, nvme_ctrlr->prchk_flags, bdev); if (rc != 0) { SPDK_ERRLOG("Failed to create NVMe disk\n"); free(bdev); @@ -1499,10 +1499,10 @@ bdev_nvme_compare_ns(struct spdk_nvme_ns *ns1, struct spdk_nvme_ns *ns2) } static void -nvme_ctrlr_populate_standard_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +nvme_ctrlr_populate_standard_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx) { - struct spdk_nvme_ctrlr *ctrlr = nvme_bdev_ctrlr->ctrlr; + struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr; struct spdk_nvme_ns *ns; int rc = 0; @@ -1516,7 +1516,7 @@ nvme_ctrlr_populate_standard_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, nvme_ns->ns = ns; nvme_ns->populated = true; - rc = nvme_bdev_create(nvme_bdev_ctrlr, nvme_ns); + rc = nvme_bdev_create(nvme_ctrlr, nvme_ns); done: nvme_ctrlr_populate_namespace_done(ctx, nvme_ns, rc); } @@ -1546,12 +1546,12 @@ hotplug_probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, static void nvme_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = ctx; + struct nvme_ctrlr *nvme_ctrlr = ctx; if (spdk_nvme_cpl_is_error(cpl)) { SPDK_WARNLOG("Abort failed. Resetting controller. sc is %u, sct is %u.\n", cpl->status.sc, cpl->status.sct); - _bdev_nvme_reset(nvme_bdev_ctrlr); + _bdev_nvme_reset(nvme_ctrlr); } } @@ -1559,11 +1559,11 @@ static void timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint16_t cid) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = cb_arg; + struct nvme_ctrlr *nvme_ctrlr = cb_arg; union spdk_nvme_csts_register csts; int rc; - assert(nvme_bdev_ctrlr->ctrlr == ctrlr); + assert(nvme_ctrlr->ctrlr == ctrlr); SPDK_WARNLOG("Warning: Detected a timeout. ctrlr=%p qpair=%p cid=%u\n", ctrlr, qpair, cid); @@ -1572,11 +1572,11 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr, * would submit another fabrics cmd on the admin queue to read CSTS and check for its * completion recursively. */ - if (nvme_bdev_ctrlr->connected_trid->trtype == SPDK_NVME_TRANSPORT_PCIE || qpair != NULL) { + if (nvme_ctrlr->connected_trid->trtype == SPDK_NVME_TRANSPORT_PCIE || qpair != NULL) { csts = spdk_nvme_ctrlr_get_regs_csts(ctrlr); if (csts.bits.cfs) { SPDK_ERRLOG("Controller Fatal Status, reset required\n"); - _bdev_nvme_reset(nvme_bdev_ctrlr); + _bdev_nvme_reset(nvme_ctrlr); return; } } @@ -1585,16 +1585,16 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr, case SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT: if (qpair) { /* Don't send abort to ctrlr when reset is running. */ - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); - if (nvme_bdev_ctrlr->resetting) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); + if (nvme_ctrlr->resetting) { + pthread_mutex_unlock(&nvme_ctrlr->mutex); SPDK_NOTICELOG("Quit abort. Ctrlr is in the process of reseting.\n"); return; } - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); rc = spdk_nvme_ctrlr_cmd_abort(ctrlr, qpair, cid, - nvme_abort_cpl, nvme_bdev_ctrlr); + nvme_abort_cpl, nvme_ctrlr); if (rc == 0) { return; } @@ -1604,7 +1604,7 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr, /* FALLTHROUGH */ case SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET: - _bdev_nvme_reset(nvme_bdev_ctrlr); + _bdev_nvme_reset(nvme_ctrlr); break; case SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE: SPDK_DEBUGLOG(bdev_nvme, "No action for nvme controller timeout.\n"); @@ -1629,14 +1629,14 @@ nvme_ctrlr_depopulate_standard_namespace(struct nvme_ns *nvme_ns) } static void -nvme_ctrlr_populate_namespace(struct nvme_bdev_ctrlr *ctrlr, struct nvme_ns *nvme_ns, +nvme_ctrlr_populate_namespace(struct nvme_ctrlr *ctrlr, struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx) { g_populate_namespace_fn[nvme_ns->type](ctrlr, nvme_ns, ctx); } static void -nvme_ctrlr_depopulate_namespace(struct nvme_bdev_ctrlr *ctrlr, struct nvme_ns *nvme_ns) +nvme_ctrlr_depopulate_namespace(struct nvme_ctrlr *ctrlr, struct nvme_ns *nvme_ns) { g_depopulate_namespace_fn[nvme_ns->type](nvme_ns); } @@ -1645,14 +1645,14 @@ void nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx, struct nvme_ns *nvme_ns, int rc) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = nvme_ns->ctrlr; + struct nvme_ctrlr *nvme_ctrlr = nvme_ns->ctrlr; - assert(nvme_bdev_ctrlr != NULL); + assert(nvme_ctrlr != NULL); if (rc == 0) { - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); - nvme_bdev_ctrlr->ref++; - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); + nvme_ctrlr->ref++; + pthread_mutex_unlock(&nvme_ctrlr->mutex); } else { memset(nvme_ns, 0, sizeof(*nvme_ns)); } @@ -1660,16 +1660,16 @@ nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx, if (ctx) { ctx->populates_in_progress--; if (ctx->populates_in_progress == 0) { - nvme_ctrlr_populate_namespaces_done(nvme_bdev_ctrlr, ctx); + nvme_ctrlr_populate_namespaces_done(nvme_ctrlr, ctx); } } } static void -nvme_ctrlr_populate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +nvme_ctrlr_populate_namespaces(struct nvme_ctrlr *nvme_ctrlr, struct nvme_async_probe_ctx *ctx) { - struct spdk_nvme_ctrlr *ctrlr = nvme_bdev_ctrlr->ctrlr; + struct spdk_nvme_ctrlr *ctrlr = nvme_ctrlr->ctrlr; struct nvme_ns *nvme_ns; struct spdk_nvme_ns *ns; struct nvme_bdev *bdev; @@ -1685,10 +1685,10 @@ nvme_ctrlr_populate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, ctx->populates_in_progress = 1; } - for (i = 0; i < nvme_bdev_ctrlr->num_ns; i++) { + for (i = 0; i < nvme_ctrlr->num_ns; i++) { uint32_t nsid = i + 1; - nvme_ns = nvme_bdev_ctrlr->namespaces[i]; + nvme_ns = nvme_ctrlr->namespaces[i]; ns_is_active = spdk_nvme_ctrlr_is_active_ns(ctrlr, nsid); if (nvme_ns->populated && ns_is_active && nvme_ns->type == NVME_NS_STANDARD) { @@ -1713,7 +1713,7 @@ nvme_ctrlr_populate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, if (!nvme_ns->populated && ns_is_active) { nvme_ns->id = nsid; - nvme_ns->ctrlr = nvme_bdev_ctrlr; + nvme_ns->ctrlr = nvme_ctrlr; if (spdk_nvme_ctrlr_is_ocssd_supported(ctrlr)) { nvme_ns->type = NVME_NS_OCSSD; } else { @@ -1725,11 +1725,11 @@ nvme_ctrlr_populate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, if (ctx) { ctx->populates_in_progress++; } - nvme_ctrlr_populate_namespace(nvme_bdev_ctrlr, nvme_ns, ctx); + nvme_ctrlr_populate_namespace(nvme_ctrlr, nvme_ns, ctx); } if (nvme_ns->populated && !ns_is_active) { - nvme_ctrlr_depopulate_namespace(nvme_bdev_ctrlr, nvme_ns); + nvme_ctrlr_depopulate_namespace(nvme_ctrlr, nvme_ns); } } @@ -1741,25 +1741,25 @@ nvme_ctrlr_populate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, */ ctx->populates_in_progress--; if (ctx->populates_in_progress == 0) { - nvme_ctrlr_populate_namespaces_done(nvme_bdev_ctrlr, ctx); + nvme_ctrlr_populate_namespaces_done(nvme_ctrlr, ctx); } } } static void -nvme_ctrlr_depopulate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +nvme_ctrlr_depopulate_namespaces(struct nvme_ctrlr *nvme_ctrlr) { uint32_t i; struct nvme_ns *nvme_ns; - for (i = 0; i < nvme_bdev_ctrlr->num_ns; i++) { + for (i = 0; i < nvme_ctrlr->num_ns; i++) { uint32_t nsid = i + 1; - nvme_ns = nvme_bdev_ctrlr->namespaces[nsid - 1]; + nvme_ns = nvme_ctrlr->namespaces[nsid - 1]; if (nvme_ns->populated) { assert(nvme_ns->id == nsid); - nvme_ctrlr_depopulate_namespace(nvme_bdev_ctrlr, nvme_ns); + nvme_ctrlr_depopulate_namespace(nvme_ctrlr, nvme_ns); } } } @@ -1767,7 +1767,7 @@ nvme_ctrlr_depopulate_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) static void aer_cb(void *arg, const struct spdk_nvme_cpl *cpl) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = arg; + struct nvme_ctrlr *nvme_ctrlr = arg; union spdk_nvme_async_event_completion event; if (spdk_nvme_cpl_is_error(cpl)) { @@ -1778,11 +1778,11 @@ aer_cb(void *arg, const struct spdk_nvme_cpl *cpl) event.raw = cpl->cdw0; if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE) && (event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED)) { - nvme_ctrlr_populate_namespaces(nvme_bdev_ctrlr, NULL); + nvme_ctrlr_populate_namespaces(nvme_ctrlr, NULL); } else if ((event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_VENDOR) && (event.bits.log_page_identifier == SPDK_OCSSD_LOG_CHUNK_NOTIFICATION) && - spdk_nvme_ctrlr_is_ocssd_supported(nvme_bdev_ctrlr->ctrlr)) { - bdev_ocssd_handle_chunk_notification(nvme_bdev_ctrlr); + spdk_nvme_ctrlr_is_ocssd_supported(nvme_ctrlr->ctrlr)) { + bdev_ocssd_handle_chunk_notification(nvme_ctrlr); } } @@ -1804,64 +1804,64 @@ populate_namespaces_cb(struct nvme_async_probe_ctx *ctx, size_t count, int rc) } static void -nvme_bdev_ctrlr_create_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, - struct nvme_async_probe_ctx *ctx) +nvme_ctrlr_create_done(struct nvme_ctrlr *nvme_ctrlr, + struct nvme_async_probe_ctx *ctx) { - spdk_io_device_register(nvme_bdev_ctrlr, + spdk_io_device_register(nvme_ctrlr, bdev_nvme_create_ctrlr_channel_cb, bdev_nvme_destroy_ctrlr_channel_cb, sizeof(struct nvme_ctrlr_channel), - nvme_bdev_ctrlr->name); + nvme_ctrlr->name); - nvme_ctrlr_populate_namespaces(nvme_bdev_ctrlr, ctx); + nvme_ctrlr_populate_namespaces(nvme_ctrlr, ctx); } static int -nvme_bdev_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr, - const char *name, - const struct spdk_nvme_transport_id *trid, - uint32_t prchk_flags, - struct nvme_async_probe_ctx *ctx) +nvme_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr, + const char *name, + const struct spdk_nvme_transport_id *trid, + uint32_t prchk_flags, + struct nvme_async_probe_ctx *ctx) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct nvme_ctrlr_trid *trid_entry; uint32_t i, num_ns; int rc; - nvme_bdev_ctrlr = calloc(1, sizeof(*nvme_bdev_ctrlr)); - if (nvme_bdev_ctrlr == NULL) { + nvme_ctrlr = calloc(1, sizeof(*nvme_ctrlr)); + if (nvme_ctrlr == NULL) { SPDK_ERRLOG("Failed to allocate device struct\n"); return -ENOMEM; } - rc = pthread_mutex_init(&nvme_bdev_ctrlr->mutex, NULL); + rc = pthread_mutex_init(&nvme_ctrlr->mutex, NULL); if (rc != 0) { - free(nvme_bdev_ctrlr); + free(nvme_ctrlr); return rc; } - TAILQ_INIT(&nvme_bdev_ctrlr->trids); + TAILQ_INIT(&nvme_ctrlr->trids); num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr); if (num_ns != 0) { - nvme_bdev_ctrlr->namespaces = calloc(num_ns, sizeof(struct nvme_ns *)); - if (!nvme_bdev_ctrlr->namespaces) { + nvme_ctrlr->namespaces = calloc(num_ns, sizeof(struct nvme_ns *)); + if (!nvme_ctrlr->namespaces) { SPDK_ERRLOG("Failed to allocate block namespaces pointer\n"); rc = -ENOMEM; goto err; } for (i = 0; i < num_ns; i++) { - nvme_bdev_ctrlr->namespaces[i] = calloc(1, sizeof(struct nvme_ns)); - if (nvme_bdev_ctrlr->namespaces[i] == NULL) { + nvme_ctrlr->namespaces[i] = calloc(1, sizeof(struct nvme_ns)); + if (nvme_ctrlr->namespaces[i] == NULL) { SPDK_ERRLOG("Failed to allocate block namespace struct\n"); rc = -ENOMEM; goto err; } - nvme_bdev_ctrlr->num_ns++; + nvme_ctrlr->num_ns++; } - assert(num_ns == nvme_bdev_ctrlr->num_ns); + assert(num_ns == nvme_ctrlr->num_ns); } trid_entry = calloc(1, sizeof(*trid_entry)); @@ -1872,51 +1872,51 @@ nvme_bdev_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr, } trid_entry->trid = *trid; - nvme_bdev_ctrlr->connected_trid = &trid_entry->trid; - TAILQ_INSERT_HEAD(&nvme_bdev_ctrlr->trids, trid_entry, link); + nvme_ctrlr->connected_trid = &trid_entry->trid; + TAILQ_INSERT_HEAD(&nvme_ctrlr->trids, trid_entry, link); - nvme_bdev_ctrlr->thread = spdk_get_thread(); - nvme_bdev_ctrlr->ctrlr = ctrlr; - nvme_bdev_ctrlr->ref = 1; - nvme_bdev_ctrlr->name = strdup(name); - if (nvme_bdev_ctrlr->name == NULL) { + nvme_ctrlr->thread = spdk_get_thread(); + nvme_ctrlr->ctrlr = ctrlr; + nvme_ctrlr->ref = 1; + nvme_ctrlr->name = strdup(name); + if (nvme_ctrlr->name == NULL) { rc = -ENOMEM; goto err; } - if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_bdev_ctrlr->ctrlr)) { - rc = bdev_ocssd_init_ctrlr(nvme_bdev_ctrlr); + if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_ctrlr->ctrlr)) { + rc = bdev_ocssd_init_ctrlr(nvme_ctrlr); if (spdk_unlikely(rc != 0)) { SPDK_ERRLOG("Unable to initialize OCSSD controller\n"); goto err; } } - nvme_bdev_ctrlr->prchk_flags = prchk_flags; + nvme_ctrlr->prchk_flags = prchk_flags; - nvme_bdev_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_bdev_ctrlr, - g_opts.nvme_adminq_poll_period_us); + nvme_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_ctrlr, + g_opts.nvme_adminq_poll_period_us); - TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq); + TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq); if (g_opts.timeout_us > 0) { spdk_nvme_ctrlr_register_timeout_callback(ctrlr, g_opts.timeout_us, - timeout_cb, nvme_bdev_ctrlr); + timeout_cb, nvme_ctrlr); } - spdk_nvme_ctrlr_register_aer_callback(ctrlr, aer_cb, nvme_bdev_ctrlr); - spdk_nvme_ctrlr_set_remove_cb(ctrlr, remove_cb, nvme_bdev_ctrlr); + spdk_nvme_ctrlr_register_aer_callback(ctrlr, aer_cb, nvme_ctrlr); + spdk_nvme_ctrlr_set_remove_cb(ctrlr, remove_cb, nvme_ctrlr); - if (spdk_nvme_ctrlr_get_flags(nvme_bdev_ctrlr->ctrlr) & + if (spdk_nvme_ctrlr_get_flags(nvme_ctrlr->ctrlr) & SPDK_NVME_CTRLR_SECURITY_SEND_RECV_SUPPORTED) { - nvme_bdev_ctrlr->opal_dev = spdk_opal_dev_construct(nvme_bdev_ctrlr->ctrlr); + nvme_ctrlr->opal_dev = spdk_opal_dev_construct(nvme_ctrlr->ctrlr); } - nvme_bdev_ctrlr_create_done(nvme_bdev_ctrlr, ctx); + nvme_ctrlr_create_done(nvme_ctrlr, ctx); return 0; err: - nvme_bdev_ctrlr_delete(nvme_bdev_ctrlr); + nvme_ctrlr_delete(nvme_ctrlr); return rc; } @@ -1947,48 +1947,48 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, SPDK_DEBUGLOG(bdev_nvme, "Attached to %s (%s)\n", trid->traddr, name); - nvme_bdev_ctrlr_create(ctrlr, name, trid, prchk_flags, NULL); + nvme_ctrlr_create(ctrlr, name, trid, prchk_flags, NULL); free(name); } static void -_nvme_bdev_ctrlr_destruct(void *ctx) +_nvme_ctrlr_destruct(void *ctx) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = ctx; + struct nvme_ctrlr *nvme_ctrlr = ctx; - nvme_ctrlr_depopulate_namespaces(nvme_bdev_ctrlr); - nvme_bdev_ctrlr_release(nvme_bdev_ctrlr); + nvme_ctrlr_depopulate_namespaces(nvme_ctrlr); + nvme_ctrlr_release(nvme_ctrlr); } static int -_bdev_nvme_delete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool hotplug) +_bdev_nvme_delete(struct nvme_ctrlr *nvme_ctrlr, bool hotplug) { struct nvme_probe_skip_entry *entry; - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); /* The controller's destruction was already started */ - if (nvme_bdev_ctrlr->destruct) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + if (nvme_ctrlr->destruct) { + pthread_mutex_unlock(&nvme_ctrlr->mutex); return 0; } if (!hotplug && - nvme_bdev_ctrlr->connected_trid->trtype == SPDK_NVME_TRANSPORT_PCIE) { + nvme_ctrlr->connected_trid->trtype == SPDK_NVME_TRANSPORT_PCIE) { entry = calloc(1, sizeof(*entry)); if (!entry) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); return -ENOMEM; } - entry->trid = *nvme_bdev_ctrlr->connected_trid; + entry->trid = *nvme_ctrlr->connected_trid; TAILQ_INSERT_TAIL(&g_skipped_nvme_ctrlrs, entry, tailq); } - nvme_bdev_ctrlr->destruct = true; - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + nvme_ctrlr->destruct = true; + pthread_mutex_unlock(&nvme_ctrlr->mutex); - _nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr); + _nvme_ctrlr_destruct(nvme_ctrlr); return 0; } @@ -1996,9 +1996,9 @@ _bdev_nvme_delete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool hotplug) static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = cb_ctx; + struct nvme_ctrlr *nvme_ctrlr = cb_ctx; - _bdev_nvme_delete(nvme_bdev_ctrlr, true); + _bdev_nvme_delete(nvme_ctrlr, true); } static int @@ -2050,7 +2050,7 @@ int bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts) { if (g_bdev_nvme_init_thread != NULL) { - if (!TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)) { + if (!TAILQ_EMPTY(&g_nvme_ctrlrs)) { return -EPERM; } } @@ -2111,7 +2111,7 @@ bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb } static void -nvme_ctrlr_populate_namespaces_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +nvme_ctrlr_populate_namespaces_done(struct nvme_ctrlr *nvme_ctrlr, struct nvme_async_probe_ctx *ctx) { struct nvme_ns *nvme_ns; @@ -2119,16 +2119,16 @@ nvme_ctrlr_populate_namespaces_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, uint32_t i, nsid; size_t j; - assert(nvme_bdev_ctrlr != NULL); + assert(nvme_ctrlr != NULL); /* * Report the new bdevs that were created in this call. * There can be more than one bdev per NVMe controller. */ j = 0; - for (i = 0; i < nvme_bdev_ctrlr->num_ns; i++) { + for (i = 0; i < nvme_ctrlr->num_ns; i++) { nsid = i + 1; - nvme_ns = nvme_bdev_ctrlr->namespaces[nsid - 1]; + nvme_ns = nvme_ctrlr->namespaces[nsid - 1]; if (!nvme_ns->populated) { continue; } @@ -2153,7 +2153,7 @@ nvme_ctrlr_populate_namespaces_done(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, } static int -bdev_nvme_compare_trids(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +bdev_nvme_compare_trids(struct nvme_ctrlr *nvme_ctrlr, struct spdk_nvme_ctrlr *new_ctrlr, struct spdk_nvme_transport_id *trid) { @@ -2165,17 +2165,17 @@ bdev_nvme_compare_trids(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, } /* Currently we only support failover to the same transport type. */ - if (nvme_bdev_ctrlr->connected_trid->trtype != trid->trtype) { + if (nvme_ctrlr->connected_trid->trtype != trid->trtype) { return -EINVAL; } /* Currently we only support failover to the same NQN. */ - if (strncmp(trid->subnqn, nvme_bdev_ctrlr->connected_trid->subnqn, SPDK_NVMF_NQN_MAX_LEN)) { + if (strncmp(trid->subnqn, nvme_ctrlr->connected_trid->subnqn, SPDK_NVMF_NQN_MAX_LEN)) { return -EINVAL; } /* Skip all the other checks if we've already registered this path. */ - TAILQ_FOREACH(tmp_trid, &nvme_bdev_ctrlr->trids, link) { + TAILQ_FOREACH(tmp_trid, &nvme_ctrlr->trids, link) { if (!spdk_nvme_transport_id_compare(&tmp_trid->trid, trid)) { return -EEXIST; } @@ -2185,21 +2185,21 @@ bdev_nvme_compare_trids(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, } static int -bdev_nvme_compare_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +bdev_nvme_compare_namespaces(struct nvme_ctrlr *nvme_ctrlr, struct spdk_nvme_ctrlr *new_ctrlr) { uint32_t i, nsid; struct nvme_ns *nvme_ns; struct spdk_nvme_ns *new_ns; - if (spdk_nvme_ctrlr_get_num_ns(new_ctrlr) != nvme_bdev_ctrlr->num_ns) { + if (spdk_nvme_ctrlr_get_num_ns(new_ctrlr) != nvme_ctrlr->num_ns) { return -EINVAL; } - for (i = 0; i < nvme_bdev_ctrlr->num_ns; i++) { + for (i = 0; i < nvme_ctrlr->num_ns; i++) { nsid = i + 1; - nvme_ns = nvme_bdev_ctrlr->namespaces[i]; + nvme_ns = nvme_ctrlr->namespaces[i]; if (!nvme_ns->populated) { continue; } @@ -2216,7 +2216,7 @@ bdev_nvme_compare_namespaces(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, } static int -_bdev_nvme_add_secondary_trid(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +_bdev_nvme_add_secondary_trid(struct nvme_ctrlr *nvme_ctrlr, struct spdk_nvme_transport_id *trid) { struct nvme_ctrlr_trid *new_trid, *tmp_trid; @@ -2228,46 +2228,46 @@ _bdev_nvme_add_secondary_trid(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, new_trid->trid = *trid; new_trid->is_failed = false; - TAILQ_FOREACH(tmp_trid, &nvme_bdev_ctrlr->trids, link) { + TAILQ_FOREACH(tmp_trid, &nvme_ctrlr->trids, link) { if (tmp_trid->is_failed) { TAILQ_INSERT_BEFORE(tmp_trid, new_trid, link); return 0; } } - TAILQ_INSERT_TAIL(&nvme_bdev_ctrlr->trids, new_trid, link); + TAILQ_INSERT_TAIL(&nvme_ctrlr->trids, new_trid, link); return 0; } /* This is the case that a secondary path is added to an existing - * nvme_bdev_ctrlr for failover. After checking if it can access the same + * nvme_ctrlr for failover. After checking if it can access the same * namespaces as the primary path, it is disconnected until failover occurs. */ static int -bdev_nvme_add_secondary_trid(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +bdev_nvme_add_secondary_trid(struct nvme_ctrlr *nvme_ctrlr, struct spdk_nvme_ctrlr *new_ctrlr, struct spdk_nvme_transport_id *trid) { int rc; - assert(nvme_bdev_ctrlr != NULL); + assert(nvme_ctrlr != NULL); - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); - rc = bdev_nvme_compare_trids(nvme_bdev_ctrlr, new_ctrlr, trid); + rc = bdev_nvme_compare_trids(nvme_ctrlr, new_ctrlr, trid); if (rc != 0) { goto exit; } - rc = bdev_nvme_compare_namespaces(nvme_bdev_ctrlr, new_ctrlr); + rc = bdev_nvme_compare_namespaces(nvme_ctrlr, new_ctrlr); if (rc != 0) { goto exit; } - rc = _bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, trid); + rc = _bdev_nvme_add_secondary_trid(nvme_ctrlr, trid); exit: - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); spdk_nvme_detach(new_ctrlr); @@ -2279,18 +2279,18 @@ connect_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts) { struct spdk_nvme_ctrlr_opts *user_opts = cb_ctx; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct nvme_async_probe_ctx *ctx; int rc; ctx = SPDK_CONTAINEROF(user_opts, struct nvme_async_probe_ctx, opts); ctx->ctrlr_attached = true; - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(ctx->base_name); - if (nvme_bdev_ctrlr) { - rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, ctrlr, &ctx->trid); + nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->base_name); + if (nvme_ctrlr) { + rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, ctrlr, &ctx->trid); } else { - rc = nvme_bdev_ctrlr_create(ctrlr, ctx->base_name, &ctx->trid, ctx->prchk_flags, ctx); + rc = nvme_ctrlr_create(ctrlr, ctx->base_name, &ctx->trid, ctx->prchk_flags, ctx); if (rc == 0) { return; } @@ -2346,7 +2346,7 @@ bdev_nvme_create(struct spdk_nvme_transport_id *trid, /* TODO expand this check to include both the host and target TRIDs. * Only if both are the same should we fail. */ - if (nvme_bdev_ctrlr_get(trid) != NULL) { + if (nvme_ctrlr_get(trid) != NULL) { SPDK_ERRLOG("A controller with the provided trid (traddr: %s) already exists.\n", trid->traddr); return -EEXIST; } @@ -2406,18 +2406,18 @@ bdev_nvme_create(struct spdk_nvme_transport_id *trid, } static int -bdev_nvme_delete_secondary_trid(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +bdev_nvme_delete_secondary_trid(struct nvme_ctrlr *nvme_ctrlr, const struct spdk_nvme_transport_id *trid) { struct nvme_ctrlr_trid *ctrlr_trid, *tmp_trid; - if (!spdk_nvme_transport_id_compare(trid, nvme_bdev_ctrlr->connected_trid)) { + if (!spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid)) { return -EBUSY; } - TAILQ_FOREACH_SAFE(ctrlr_trid, &nvme_bdev_ctrlr->trids, link, tmp_trid) { + TAILQ_FOREACH_SAFE(ctrlr_trid, &nvme_ctrlr->trids, link, tmp_trid) { if (!spdk_nvme_transport_id_compare(&ctrlr_trid->trid, trid)) { - TAILQ_REMOVE(&nvme_bdev_ctrlr->trids, ctrlr_trid, link); + TAILQ_REMOVE(&nvme_ctrlr->trids, ctrlr_trid, link); free(ctrlr_trid); return 0; } @@ -2429,39 +2429,39 @@ bdev_nvme_delete_secondary_trid(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, int bdev_nvme_delete(const char *name, const struct spdk_nvme_transport_id *trid) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct nvme_ctrlr_trid *ctrlr_trid; if (name == NULL) { return -EINVAL; } - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(name); - if (nvme_bdev_ctrlr == NULL) { + nvme_ctrlr = nvme_ctrlr_get_by_name(name); + if (nvme_ctrlr == NULL) { SPDK_ERRLOG("Failed to find NVMe controller\n"); return -ENODEV; } /* case 1: remove the controller itself. */ if (trid == NULL) { - return _bdev_nvme_delete(nvme_bdev_ctrlr, false); + return _bdev_nvme_delete(nvme_ctrlr, false); } /* case 2: we are currently using the path to be removed. */ - if (!spdk_nvme_transport_id_compare(trid, nvme_bdev_ctrlr->connected_trid)) { - ctrlr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); - assert(nvme_bdev_ctrlr->connected_trid == &ctrlr_trid->trid); + if (!spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid)) { + ctrlr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); + assert(nvme_ctrlr->connected_trid == &ctrlr_trid->trid); /* case 2A: the current path is the only path. */ if (!TAILQ_NEXT(ctrlr_trid, link)) { - return _bdev_nvme_delete(nvme_bdev_ctrlr, false); + return _bdev_nvme_delete(nvme_ctrlr, false); } /* case 2B: there is an alternative path. */ - return bdev_nvme_failover(nvme_bdev_ctrlr, true); + return bdev_nvme_failover(nvme_ctrlr, true); } /* case 3: We are not using the specified path. */ - return bdev_nvme_delete_secondary_trid(nvme_bdev_ctrlr, trid); + return bdev_nvme_delete_secondary_trid(nvme_ctrlr, trid); } static int @@ -2469,7 +2469,7 @@ bdev_nvme_library_init(void) { g_bdev_nvme_init_thread = spdk_get_thread(); - spdk_io_device_register(&g_nvme_bdev_ctrlrs, bdev_nvme_poll_group_create_cb, + spdk_io_device_register(&g_nvme_ctrlrs, bdev_nvme_poll_group_create_cb, bdev_nvme_poll_group_destroy_cb, sizeof(struct nvme_bdev_poll_group), "bdev_nvme_poll_groups"); @@ -2479,7 +2479,7 @@ bdev_nvme_library_init(void) static void bdev_nvme_library_fini(void) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, *tmp; + struct nvme_ctrlr *nvme_ctrlr, *tmp; struct nvme_probe_skip_entry *entry, *entry_tmp; spdk_poller_unregister(&g_hotplug_poller); @@ -2492,26 +2492,26 @@ bdev_nvme_library_fini(void) } pthread_mutex_lock(&g_bdev_nvme_mutex); - TAILQ_FOREACH_SAFE(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq, tmp) { - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); - if (nvme_bdev_ctrlr->destruct) { + TAILQ_FOREACH_SAFE(nvme_ctrlr, &g_nvme_ctrlrs, tailq, tmp) { + pthread_mutex_lock(&nvme_ctrlr->mutex); + if (nvme_ctrlr->destruct) { /* This controller's destruction was already started * before the application started shutting down */ - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); continue; } - nvme_bdev_ctrlr->destruct = true; - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + nvme_ctrlr->destruct = true; + pthread_mutex_unlock(&nvme_ctrlr->mutex); - spdk_thread_send_msg(nvme_bdev_ctrlr->thread, _nvme_bdev_ctrlr_destruct, - nvme_bdev_ctrlr); + spdk_thread_send_msg(nvme_ctrlr->thread, _nvme_ctrlr_destruct, + nvme_ctrlr); } g_bdev_nvme_module_finish = true; - if (TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)) { + if (TAILQ_EMPTY(&g_nvme_ctrlrs)) { pthread_mutex_unlock(&g_bdev_nvme_mutex); - spdk_io_device_unregister(&g_nvme_bdev_ctrlrs, NULL); + spdk_io_device_unregister(&g_nvme_ctrlrs, NULL); spdk_bdev_module_finish_done(); return; } @@ -3287,14 +3287,14 @@ static int bdev_nvme_admin_passthru(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio, struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; uint32_t max_xfer_size; - if (!bdev_nvme_find_admin_path(ctrlr_ch, &nvme_bdev_ctrlr)) { + if (!bdev_nvme_find_admin_path(ctrlr_ch, &nvme_ctrlr)) { return -EINVAL; } - max_xfer_size = spdk_nvme_ctrlr_get_max_xfer_size(nvme_bdev_ctrlr->ctrlr); + max_xfer_size = spdk_nvme_ctrlr_get_max_xfer_size(nvme_ctrlr->ctrlr); if (nbytes > max_xfer_size) { SPDK_ERRLOG("nbytes is greater than MDTS %" PRIu32 ".\n", max_xfer_size); @@ -3303,7 +3303,7 @@ bdev_nvme_admin_passthru(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_i bio->orig_thread = spdk_get_thread(); - return spdk_nvme_ctrlr_cmd_admin_raw(nvme_bdev_ctrlr->ctrlr, cmd, buf, + return spdk_nvme_ctrlr_cmd_admin_raw(nvme_ctrlr->ctrlr, cmd, buf, (uint32_t)nbytes, bdev_nvme_admin_passthru_done, bio); } @@ -3444,24 +3444,24 @@ bdev_nvme_opts_config_json(struct spdk_json_write_ctx *w) } static void -nvme_bdev_ctrlr_config_json(struct spdk_json_write_ctx *w, - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +nvme_ctrlr_config_json(struct spdk_json_write_ctx *w, + struct nvme_ctrlr *nvme_ctrlr) { struct spdk_nvme_transport_id *trid; - trid = nvme_bdev_ctrlr->connected_trid; + trid = nvme_ctrlr->connected_trid; spdk_json_write_object_begin(w); spdk_json_write_named_string(w, "method", "bdev_nvme_attach_controller"); spdk_json_write_named_object_begin(w, "params"); - spdk_json_write_named_string(w, "name", nvme_bdev_ctrlr->name); + spdk_json_write_named_string(w, "name", nvme_ctrlr->name); nvme_bdev_dump_trid_json(trid, w); spdk_json_write_named_bool(w, "prchk_reftag", - (nvme_bdev_ctrlr->prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_REFTAG) != 0); + (nvme_ctrlr->prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_REFTAG) != 0); spdk_json_write_named_bool(w, "prchk_guard", - (nvme_bdev_ctrlr->prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_GUARD) != 0); + (nvme_ctrlr->prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_GUARD) != 0); spdk_json_write_object_end(w); @@ -3485,22 +3485,22 @@ bdev_nvme_hotplug_config_json(struct spdk_json_write_ctx *w) static int bdev_nvme_config_json(struct spdk_json_write_ctx *w) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; uint32_t nsid; bdev_nvme_opts_config_json(w); pthread_mutex_lock(&g_bdev_nvme_mutex); - TAILQ_FOREACH(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) { - nvme_bdev_ctrlr_config_json(w, nvme_bdev_ctrlr); + TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { + nvme_ctrlr_config_json(w, nvme_ctrlr); - for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) { - if (!nvme_bdev_ctrlr->namespaces[nsid]->populated) { + for (nsid = 0; nsid < nvme_ctrlr->num_ns; ++nsid) { + if (!nvme_ctrlr->namespaces[nsid]->populated) { continue; } - nvme_namespace_config_json(w, nvme_bdev_ctrlr->namespaces[nsid]); + nvme_namespace_config_json(w, nvme_ctrlr->namespaces[nsid]); } } diff --git a/module/bdev/nvme/bdev_nvme_cuse_rpc.c b/module/bdev/nvme/bdev_nvme_cuse_rpc.c index 1f4a6948f3..c419115201 100644 --- a/module/bdev/nvme/bdev_nvme_cuse_rpc.c +++ b/module/bdev/nvme/bdev_nvme_cuse_rpc.c @@ -61,7 +61,7 @@ rpc_nvme_cuse_register(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) { struct rpc_nvme_cuse_register req = {}; - struct nvme_bdev_ctrlr *bdev_ctrlr = NULL; + struct nvme_ctrlr *bdev_ctrlr = NULL; int rc; if (spdk_json_decode_object(params, rpc_nvme_cuse_register_decoders, @@ -73,7 +73,7 @@ rpc_nvme_cuse_register(struct spdk_jsonrpc_request *request, goto cleanup; } - bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); + bdev_ctrlr = nvme_ctrlr_get_by_name(req.name); if (!bdev_ctrlr) { SPDK_ERRLOG("No such controller\n"); spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); @@ -113,7 +113,7 @@ rpc_nvme_cuse_unregister(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) { struct rpc_nvme_cuse_unregister req = {}; - struct nvme_bdev_ctrlr *bdev_ctrlr = NULL; + struct nvme_ctrlr *bdev_ctrlr = NULL; int rc; if (spdk_json_decode_object(params, rpc_nvme_cuse_unregister_decoders, @@ -125,7 +125,7 @@ rpc_nvme_cuse_unregister(struct spdk_jsonrpc_request *request, goto cleanup; } - bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); + bdev_ctrlr = nvme_ctrlr_get_by_name(req.name); if (!bdev_ctrlr) { SPDK_ERRLOG("No such controller\n"); spdk_jsonrpc_send_error_response(request, -ENODEV, spdk_strerror(ENODEV)); diff --git a/module/bdev/nvme/bdev_nvme_rpc.c b/module/bdev/nvme/bdev_nvme_rpc.c index 6000ab1461..2c57fe4a6b 100644 --- a/module/bdev/nvme/bdev_nvme_rpc.c +++ b/module/bdev/nvme/bdev_nvme_rpc.c @@ -268,7 +268,7 @@ rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_host_id hostid = {}; uint32_t prchk_flags = 0; - struct nvme_bdev_ctrlr *ctrlr = NULL; + struct nvme_ctrlr *ctrlr = NULL; size_t len, maxlen; int rc; @@ -302,7 +302,7 @@ rpc_bdev_nvme_attach_controller(struct spdk_jsonrpc_request *request, rc = spdk_nvme_transport_id_parse_trtype(&trid.trtype, ctx->req.trtype); assert(rc == 0); - ctrlr = nvme_bdev_ctrlr_get_by_name(ctx->req.name); + ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); /* Parse traddr */ maxlen = sizeof(trid.traddr); @@ -413,20 +413,20 @@ SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_nvme_attach_controller, construct_nvme_b static void rpc_dump_nvme_controller_info(struct spdk_json_write_ctx *w, - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) + struct nvme_ctrlr *nvme_ctrlr) { struct spdk_nvme_transport_id *trid; - trid = nvme_bdev_ctrlr->connected_trid; + trid = nvme_ctrlr->connected_trid; spdk_json_write_object_begin(w); - spdk_json_write_named_string(w, "name", nvme_bdev_ctrlr->name); + spdk_json_write_named_string(w, "name", nvme_ctrlr->name); #ifdef SPDK_CONFIG_NVME_CUSE size_t cuse_name_size = 128; char cuse_name[cuse_name_size]; - int rc = spdk_nvme_cuse_get_ctrlr_name(nvme_bdev_ctrlr->ctrlr, cuse_name, &cuse_name_size); + int rc = spdk_nvme_cuse_get_ctrlr_name(nvme_ctrlr->ctrlr, cuse_name, &cuse_name_size); if (rc == 0) { spdk_json_write_named_string(w, "cuse_device", cuse_name); } @@ -459,7 +459,7 @@ rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, { struct rpc_bdev_nvme_get_controllers req = {}; struct spdk_json_write_ctx *w; - struct nvme_bdev_ctrlr *ctrlr = NULL; + struct nvme_ctrlr *ctrlr = NULL; if (params && spdk_json_decode_object(params, rpc_bdev_nvme_get_controllers_decoders, SPDK_COUNTOF(rpc_bdev_nvme_get_controllers_decoders), @@ -471,7 +471,7 @@ rpc_bdev_nvme_get_controllers(struct spdk_jsonrpc_request *request, } if (req.name) { - ctrlr = nvme_bdev_ctrlr_get_by_name(req.name); + ctrlr = nvme_ctrlr_get_by_name(req.name); if (ctrlr == NULL) { SPDK_ERRLOG("ctrlr '%s' does not exist\n", req.name); spdk_jsonrpc_send_error_response_fmt(request, EINVAL, "Controller %s does not exist", req.name); @@ -1085,7 +1085,7 @@ rpc_bdev_nvme_get_transport_statistics(struct spdk_jsonrpc_request *request, spdk_json_write_object_begin(ctx->w); spdk_json_write_named_array_begin(ctx->w, "poll_groups"); - spdk_for_each_channel(&g_nvme_bdev_ctrlrs, + spdk_for_each_channel(&g_nvme_ctrlrs, rpc_bdev_nvme_stats_per_channel, ctx, rpc_bdev_nvme_stats_done); diff --git a/module/bdev/nvme/bdev_ocssd.c b/module/bdev/nvme/bdev_ocssd.c index dceedeef23..41fd21d2b7 100644 --- a/module/bdev/nvme/bdev_ocssd.c +++ b/module/bdev/nvme/bdev_ocssd.c @@ -238,7 +238,7 @@ bdev_ocssd_destruct(void *ctx) if (!nvme_ns->populated) { pthread_mutex_unlock(&nvme_ns->ctrlr->mutex); - nvme_bdev_ctrlr_release(nvme_ns->ctrlr); + nvme_ctrlr_release(nvme_ns->ctrlr); } else { pthread_mutex_unlock(&nvme_ns->ctrlr->mutex); } @@ -1037,14 +1037,14 @@ bdev_ocssd_chunk_notification_cb(void *ctx, const struct spdk_nvme_cpl *cpl) static int bdev_ocssd_poll_mm(void *ctx) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = ctx; + struct nvme_ctrlr *nvme_ctrlr = ctx; struct nvme_ns *nvme_ns; struct bdev_ocssd_ns *ocssd_ns; uint32_t nsid; int rc; - for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) { - nvme_ns = nvme_bdev_ctrlr->namespaces[nsid]; + for (nsid = 0; nsid < nvme_ctrlr->num_ns; ++nsid) { + nvme_ns = nvme_ctrlr->namespaces[nsid]; if (nvme_ns == NULL || !nvme_ns->populated) { continue; } @@ -1054,7 +1054,7 @@ bdev_ocssd_poll_mm(void *ctx) ocssd_ns->chunk_notify_pending = false; ocssd_ns->num_outstanding++; - rc = spdk_nvme_ctrlr_cmd_get_log_page(nvme_bdev_ctrlr->ctrlr, + rc = spdk_nvme_ctrlr_cmd_get_log_page(nvme_ctrlr->ctrlr, SPDK_OCSSD_LOG_CHUNK_NOTIFICATION, nsid + 1, ocssd_ns->chunk, sizeof(ocssd_ns->chunk[0]) * @@ -1073,14 +1073,14 @@ bdev_ocssd_poll_mm(void *ctx) } void -bdev_ocssd_handle_chunk_notification(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +bdev_ocssd_handle_chunk_notification(struct nvme_ctrlr *nvme_ctrlr) { struct bdev_ocssd_ns *ocssd_ns; struct nvme_ns *nvme_ns; uint32_t nsid; - for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) { - nvme_ns = nvme_bdev_ctrlr->namespaces[nsid]; + for (nsid = 0; nsid < nvme_ctrlr->num_ns; ++nsid) { + nvme_ns = nvme_ctrlr->namespaces[nsid]; if (nvme_ns == NULL || !nvme_ns->populated) { continue; } @@ -1248,7 +1248,7 @@ void bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t nsid, bdev_ocssd_create_cb cb_fn, void *cb_arg) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct bdev_ocssd_create_ctx *create_ctx = NULL; struct nvme_bdev *nvme_bdev = NULL; struct ocssd_bdev *ocssd_bdev = NULL; @@ -1258,14 +1258,14 @@ bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t n struct spdk_ocssd_geometry_data *geometry; int rc = 0; - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(ctrlr_name); - if (!nvme_bdev_ctrlr) { + nvme_ctrlr = nvme_ctrlr_get_by_name(ctrlr_name); + if (!nvme_ctrlr) { SPDK_ERRLOG("Unable to find controller %s\n", ctrlr_name); rc = -ENODEV; goto error; } - ns = spdk_nvme_ctrlr_get_ns(nvme_bdev_ctrlr->ctrlr, nsid); + ns = spdk_nvme_ctrlr_get_ns(nvme_ctrlr->ctrlr, nsid); if (!ns) { SPDK_ERRLOG("Unable to retrieve namespace %"PRIu32"\n", nsid); rc = -ENODEV; @@ -1278,8 +1278,8 @@ bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t n goto error; } - assert(nsid <= nvme_bdev_ctrlr->num_ns); - nvme_ns = nvme_bdev_ctrlr->namespaces[nsid - 1]; + assert(nsid <= nvme_ctrlr->num_ns); + nvme_ns = nvme_ctrlr->namespaces[nsid - 1]; if (nvme_ns == NULL) { SPDK_ERRLOG("Namespace %"PRIu32" is not initialized\n", nsid); rc = -EINVAL; @@ -1444,7 +1444,7 @@ bdev_ocssd_geometry_cb(void *_ctx, const struct spdk_nvme_cpl *cpl) } void -bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +bdev_ocssd_populate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *nvme_ctx) { @@ -1453,7 +1453,7 @@ bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, struct spdk_nvme_ns *ns; int rc; - ns = spdk_nvme_ctrlr_get_ns(nvme_bdev_ctrlr->ctrlr, nvme_ns->id); + ns = spdk_nvme_ctrlr_get_ns(nvme_ctrlr->ctrlr, nvme_ns->id); if (ns == NULL) { rc = -EINVAL; goto error; @@ -1478,7 +1478,7 @@ bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, ctx->nvme_ctx = nvme_ctx; ctx->nvme_ns = nvme_ns; - rc = spdk_nvme_ocssd_ctrlr_cmd_geometry(nvme_bdev_ctrlr->ctrlr, nvme_ns->id, + rc = spdk_nvme_ocssd_ctrlr_cmd_geometry(nvme_ctrlr->ctrlr, nvme_ns->id, &ocssd_ns->geometry, sizeof(ocssd_ns->geometry), bdev_ocssd_geometry_cb, ctx); @@ -1550,7 +1550,7 @@ bdev_ocssd_destroy_io_channel(struct nvme_ctrlr_channel *ctrlr_ch) } int -bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +bdev_ocssd_init_ctrlr(struct nvme_ctrlr *nvme_ctrlr) { struct ocssd_bdev_ctrlr *ocssd_ctrlr; @@ -1559,24 +1559,24 @@ bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) return -ENOMEM; } - ocssd_ctrlr->mm_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_mm, nvme_bdev_ctrlr, + ocssd_ctrlr->mm_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_mm, nvme_ctrlr, 10000ULL); if (!ocssd_ctrlr->mm_poller) { free(ocssd_ctrlr); return -ENOMEM; } - nvme_bdev_ctrlr->ocssd_ctrlr = ocssd_ctrlr; + nvme_ctrlr->ocssd_ctrlr = ocssd_ctrlr; return 0; } void -bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +bdev_ocssd_fini_ctrlr(struct nvme_ctrlr *nvme_ctrlr) { - spdk_poller_unregister(&nvme_bdev_ctrlr->ocssd_ctrlr->mm_poller); - free(nvme_bdev_ctrlr->ocssd_ctrlr); - nvme_bdev_ctrlr->ocssd_ctrlr = NULL; + spdk_poller_unregister(&nvme_ctrlr->ocssd_ctrlr->mm_poller); + free(nvme_ctrlr->ocssd_ctrlr); + nvme_ctrlr->ocssd_ctrlr = NULL; } SPDK_LOG_REGISTER_COMPONENT(bdev_ocssd) diff --git a/module/bdev/nvme/bdev_ocssd.h b/module/bdev/nvme/bdev_ocssd.h index 8aba28d70f..fb534be227 100644 --- a/module/bdev/nvme/bdev_ocssd.h +++ b/module/bdev/nvme/bdev_ocssd.h @@ -44,7 +44,7 @@ void bdev_ocssd_create_bdev(const char *ctrlr_name, const char *bdev_name, uint3 bdev_ocssd_create_cb cb_fn, void *cb_arg); void bdev_ocssd_delete_bdev(const char *bdev_name, bdev_ocssd_delete_cb cb_fn, void *cb_arg); -void bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +void bdev_ocssd_populate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx); void bdev_ocssd_depopulate_namespace(struct nvme_ns *nvme_ns); @@ -53,9 +53,9 @@ void bdev_ocssd_namespace_config_json(struct spdk_json_write_ctx *w, struct nvme int bdev_ocssd_create_io_channel(struct nvme_ctrlr_channel *ioch); void bdev_ocssd_destroy_io_channel(struct nvme_ctrlr_channel *ioch); -int bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr); -void bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr); +int bdev_ocssd_init_ctrlr(struct nvme_ctrlr *nvme_ctrlr); +void bdev_ocssd_fini_ctrlr(struct nvme_ctrlr *nvme_ctrlr); -void bdev_ocssd_handle_chunk_notification(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr); +void bdev_ocssd_handle_chunk_notification(struct nvme_ctrlr *nvme_ctrlr); #endif /* SPDK_BDEV_OCSSD_H */ diff --git a/module/bdev/nvme/common.c b/module/bdev/nvme/common.c index 0b904adc73..4e753bc786 100644 --- a/module/bdev/nvme/common.c +++ b/module/bdev/nvme/common.c @@ -35,50 +35,50 @@ #include "bdev_ocssd.h" #include "common.h" -struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_bdev_ctrlrs); +struct nvme_ctrlrs g_nvme_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_ctrlrs); pthread_mutex_t g_bdev_nvme_mutex = PTHREAD_MUTEX_INITIALIZER; bool g_bdev_nvme_module_finish; -struct nvme_bdev_ctrlr * -nvme_bdev_ctrlr_get(const struct spdk_nvme_transport_id *trid) +struct nvme_ctrlr * +nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; - TAILQ_FOREACH(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) { - if (spdk_nvme_transport_id_compare(trid, nvme_bdev_ctrlr->connected_trid) == 0) { - return nvme_bdev_ctrlr; + TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { + if (spdk_nvme_transport_id_compare(trid, nvme_ctrlr->connected_trid) == 0) { + return nvme_ctrlr; } } return NULL; } -struct nvme_bdev_ctrlr * -nvme_bdev_ctrlr_get_by_name(const char *name) +struct nvme_ctrlr * +nvme_ctrlr_get_by_name(const char *name) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; if (name == NULL) { return NULL; } - TAILQ_FOREACH(nvme_bdev_ctrlr, &g_nvme_bdev_ctrlrs, tailq) { - if (strcmp(name, nvme_bdev_ctrlr->name) == 0) { - return nvme_bdev_ctrlr; + TAILQ_FOREACH(nvme_ctrlr, &g_nvme_ctrlrs, tailq) { + if (strcmp(name, nvme_ctrlr->name) == 0) { + return nvme_ctrlr; } } return NULL; } -struct nvme_bdev_ctrlr * +struct nvme_ctrlr * nvme_bdev_first_ctrlr(void) { - return TAILQ_FIRST(&g_nvme_bdev_ctrlrs); + return TAILQ_FIRST(&g_nvme_ctrlrs); } -struct nvme_bdev_ctrlr * -nvme_bdev_next_ctrlr(struct nvme_bdev_ctrlr *prev) +struct nvme_ctrlr * +nvme_bdev_next_ctrlr(struct nvme_ctrlr *prev) { return TAILQ_NEXT(prev, tailq); } @@ -113,52 +113,52 @@ nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid, struct spdk_ } void -nvme_bdev_ctrlr_delete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr) { struct nvme_ctrlr_trid *trid, *tmp_trid; uint32_t i; - if (nvme_bdev_ctrlr->opal_dev) { - spdk_opal_dev_destruct(nvme_bdev_ctrlr->opal_dev); - nvme_bdev_ctrlr->opal_dev = NULL; + if (nvme_ctrlr->opal_dev) { + spdk_opal_dev_destruct(nvme_ctrlr->opal_dev); + nvme_ctrlr->opal_dev = NULL; } - if (nvme_bdev_ctrlr->ocssd_ctrlr) { - bdev_ocssd_fini_ctrlr(nvme_bdev_ctrlr); + if (nvme_ctrlr->ocssd_ctrlr) { + bdev_ocssd_fini_ctrlr(nvme_ctrlr); } pthread_mutex_lock(&g_bdev_nvme_mutex); - TAILQ_REMOVE(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq); + TAILQ_REMOVE(&g_nvme_ctrlrs, nvme_ctrlr, tailq); pthread_mutex_unlock(&g_bdev_nvme_mutex); - spdk_nvme_detach(nvme_bdev_ctrlr->ctrlr); - spdk_poller_unregister(&nvme_bdev_ctrlr->adminq_timer_poller); - free(nvme_bdev_ctrlr->name); - for (i = 0; i < nvme_bdev_ctrlr->num_ns; i++) { - free(nvme_bdev_ctrlr->namespaces[i]); + spdk_nvme_detach(nvme_ctrlr->ctrlr); + spdk_poller_unregister(&nvme_ctrlr->adminq_timer_poller); + free(nvme_ctrlr->name); + for (i = 0; i < nvme_ctrlr->num_ns; i++) { + free(nvme_ctrlr->namespaces[i]); } - TAILQ_FOREACH_SAFE(trid, &nvme_bdev_ctrlr->trids, link, tmp_trid) { - TAILQ_REMOVE(&nvme_bdev_ctrlr->trids, trid, link); + TAILQ_FOREACH_SAFE(trid, &nvme_ctrlr->trids, link, tmp_trid) { + TAILQ_REMOVE(&nvme_ctrlr->trids, trid, link); free(trid); } - pthread_mutex_destroy(&nvme_bdev_ctrlr->mutex); + pthread_mutex_destroy(&nvme_ctrlr->mutex); - free(nvme_bdev_ctrlr->namespaces); - free(nvme_bdev_ctrlr); + free(nvme_ctrlr->namespaces); + free(nvme_ctrlr); } static void -nvme_bdev_ctrlr_unregister_cb(void *io_device) +nvme_ctrlr_unregister_cb(void *io_device) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = io_device; + struct nvme_ctrlr *nvme_ctrlr = io_device; - nvme_bdev_ctrlr_delete(nvme_bdev_ctrlr); + nvme_ctrlr_delete(nvme_ctrlr); pthread_mutex_lock(&g_bdev_nvme_mutex); - if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)) { + if (g_bdev_nvme_module_finish && TAILQ_EMPTY(&g_nvme_ctrlrs)) { pthread_mutex_unlock(&g_bdev_nvme_mutex); - spdk_io_device_unregister(&g_nvme_bdev_ctrlrs, NULL); + spdk_io_device_unregister(&g_nvme_ctrlrs, NULL); spdk_bdev_module_finish_done(); return; } @@ -167,48 +167,48 @@ nvme_bdev_ctrlr_unregister_cb(void *io_device) } void -nvme_bdev_ctrlr_unregister(void *ctx) +nvme_ctrlr_unregister(void *ctx) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = ctx; + struct nvme_ctrlr *nvme_ctrlr = ctx; - spdk_io_device_unregister(nvme_bdev_ctrlr, nvme_bdev_ctrlr_unregister_cb); + spdk_io_device_unregister(nvme_ctrlr, nvme_ctrlr_unregister_cb); } void -nvme_bdev_ctrlr_release(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr) { - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); - assert(nvme_bdev_ctrlr->ref > 0); - nvme_bdev_ctrlr->ref--; + assert(nvme_ctrlr->ref > 0); + nvme_ctrlr->ref--; - if (nvme_bdev_ctrlr->ref > 0 || !nvme_bdev_ctrlr->destruct || - nvme_bdev_ctrlr->resetting) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + if (nvme_ctrlr->ref > 0 || !nvme_ctrlr->destruct || + nvme_ctrlr->resetting) { + pthread_mutex_unlock(&nvme_ctrlr->mutex); return; } - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); - nvme_bdev_ctrlr_unregister(nvme_bdev_ctrlr); + nvme_ctrlr_unregister(nvme_ctrlr); } void nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns) { - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = nvme_ns->ctrlr; + struct nvme_ctrlr *nvme_ctrlr = nvme_ns->ctrlr; - assert(nvme_bdev_ctrlr != NULL); + assert(nvme_ctrlr != NULL); - pthread_mutex_lock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_lock(&nvme_ctrlr->mutex); nvme_ns->populated = false; if (nvme_ns->bdev != NULL) { - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); return; } - pthread_mutex_unlock(&nvme_bdev_ctrlr->mutex); + pthread_mutex_unlock(&nvme_ctrlr->mutex); - nvme_bdev_ctrlr_release(nvme_bdev_ctrlr); + nvme_ctrlr_release(nvme_ctrlr); } diff --git a/module/bdev/nvme/common.h b/module/bdev/nvme/common.h index e9e12a6b75..103991953f 100644 --- a/module/bdev/nvme/common.h +++ b/module/bdev/nvme/common.h @@ -39,8 +39,8 @@ #include "spdk/bdev_module.h" #include "spdk/opal.h" -TAILQ_HEAD(nvme_bdev_ctrlrs, nvme_bdev_ctrlr); -extern struct nvme_bdev_ctrlrs g_nvme_bdev_ctrlrs; +TAILQ_HEAD(nvme_ctrlrs, nvme_ctrlr); +extern struct nvme_ctrlrs g_nvme_ctrlrs; extern pthread_mutex_t g_bdev_nvme_mutex; extern bool g_bdev_nvme_module_finish; @@ -63,7 +63,7 @@ struct nvme_ns { */ bool populated; struct spdk_nvme_ns *ns; - struct nvme_bdev_ctrlr *ctrlr; + struct nvme_ctrlr *ctrlr; struct nvme_bdev *bdev; void *type_ctx; }; @@ -76,7 +76,7 @@ struct nvme_ctrlr_trid { bool is_failed; }; -struct nvme_bdev_ctrlr { +struct nvme_ctrlr { /** * points to pinned, physically contiguous memory region; * contains 4KB IDENTIFY structure for controller which is @@ -110,7 +110,7 @@ struct nvme_bdev_ctrlr { struct spdk_bdev_io *reset_bdev_io; /** linked list pointer for device list */ - TAILQ_ENTRY(nvme_bdev_ctrlr) tailq; + TAILQ_ENTRY(nvme_ctrlr) tailq; TAILQ_HEAD(, nvme_ctrlr_trid) trids; @@ -155,7 +155,7 @@ struct nvme_async_probe_ctx { struct ocssd_io_channel; struct nvme_ctrlr_channel { - struct nvme_bdev_ctrlr *ctrlr; + struct nvme_ctrlr *ctrlr; struct spdk_nvme_qpair *qpair; struct nvme_bdev_poll_group *group; TAILQ_HEAD(, spdk_bdev_io) pending_resets; @@ -166,16 +166,16 @@ void nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx, struct nvme_ns *nvme_ns, int rc); void nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns); -struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get(const struct spdk_nvme_transport_id *trid); -struct nvme_bdev_ctrlr *nvme_bdev_ctrlr_get_by_name(const char *name); -struct nvme_bdev_ctrlr *nvme_bdev_first_ctrlr(void); -struct nvme_bdev_ctrlr *nvme_bdev_next_ctrlr(struct nvme_bdev_ctrlr *prev); +struct nvme_ctrlr *nvme_ctrlr_get(const struct spdk_nvme_transport_id *trid); +struct nvme_ctrlr *nvme_ctrlr_get_by_name(const char *name); +struct nvme_ctrlr *nvme_bdev_first_ctrlr(void); +struct nvme_ctrlr *nvme_bdev_next_ctrlr(struct nvme_ctrlr *prev); void nvme_bdev_dump_trid_json(const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w); -void nvme_bdev_ctrlr_release(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr); -void nvme_bdev_ctrlr_unregister(void *ctx); -void nvme_bdev_ctrlr_delete(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr); +void nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr); +void nvme_ctrlr_unregister(void *ctx); +void nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr); #endif /* SPDK_COMMON_BDEV_NVME_H */ diff --git a/module/bdev/nvme/nvme_rpc.c b/module/bdev/nvme/nvme_rpc.c index f212e9e706..f098b325ec 100644 --- a/module/bdev/nvme/nvme_rpc.c +++ b/module/bdev/nvme/nvme_rpc.c @@ -70,7 +70,7 @@ struct rpc_bdev_nvme_send_cmd_ctx { struct spdk_jsonrpc_request *jsonrpc_request; struct rpc_bdev_nvme_send_cmd_req req; struct rpc_bdev_nvme_send_cmd_resp resp; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct spdk_io_channel *ctrlr_io_ch; }; @@ -172,7 +172,7 @@ static int nvme_rpc_admin_cmd_bdev_nvme(struct rpc_bdev_nvme_send_cmd_ctx *ctx, struct spdk_nvme_cmd *cmd, void *buf, uint32_t nbytes, uint32_t timeout_ms) { - struct nvme_bdev_ctrlr *_nvme_ctrlr = ctx->nvme_bdev_ctrlr; + struct nvme_ctrlr *_nvme_ctrlr = ctx->nvme_ctrlr; int ret; ret = spdk_nvme_ctrlr_cmd_admin_raw(_nvme_ctrlr->ctrlr, cmd, buf, @@ -186,7 +186,7 @@ nvme_rpc_io_cmd_bdev_nvme(struct rpc_bdev_nvme_send_cmd_ctx *ctx, struct spdk_nv void *buf, uint32_t nbytes, void *md_buf, uint32_t md_len, uint32_t timeout_ms) { - struct nvme_bdev_ctrlr *_nvme_ctrlr = ctx->nvme_bdev_ctrlr; + struct nvme_ctrlr *_nvme_ctrlr = ctx->nvme_ctrlr; struct spdk_nvme_qpair *io_qpair; int ret; @@ -464,8 +464,8 @@ rpc_bdev_nvme_send_cmd(struct spdk_jsonrpc_request *request, goto invalid; } - ctx->nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name(ctx->req.name); - if (ctx->nvme_bdev_ctrlr == NULL) { + ctx->nvme_ctrlr = nvme_ctrlr_get_by_name(ctx->req.name); + if (ctx->nvme_ctrlr == NULL) { SPDK_ERRLOG("Failed at device lookup\n"); error_code = SPDK_JSONRPC_ERROR_INVALID_PARAMS; ret = -EINVAL; diff --git a/module/bdev/nvme/vbdev_opal.c b/module/bdev/nvme/vbdev_opal.c index a4d1288c3c..5449f891d8 100644 --- a/module/bdev/nvme/vbdev_opal.c +++ b/module/bdev/nvme/vbdev_opal.c @@ -42,7 +42,7 @@ struct opal_vbdev { char *name; - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct spdk_opal_dev *opal_dev; struct spdk_bdev_part *bdev_part; @@ -227,7 +227,7 @@ struct spdk_opal_locking_range_info * vbdev_opal_get_info_from_bdev(const char *opal_bdev_name, const char *password) { struct opal_vbdev *vbdev; - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; int locking_range_id; int rc; @@ -321,7 +321,7 @@ vbdev_opal_create(const char *nvme_ctrlr_name, uint32_t nsid, uint8_t locking_ra int rc; char *opal_vbdev_name; char *base_bdev_name; - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct opal_vbdev *opal_bdev; struct vbdev_opal_part_base *opal_part_base = NULL; struct spdk_bdev_part *part_bdev; @@ -332,7 +332,7 @@ vbdev_opal_create(const char *nvme_ctrlr_name, uint32_t nsid, uint8_t locking_ra return -EINVAL; } - nvme_ctrlr = nvme_bdev_ctrlr_get_by_name(nvme_ctrlr_name); + nvme_ctrlr = nvme_ctrlr_get_by_name(nvme_ctrlr_name); if (!nvme_ctrlr) { SPDK_ERRLOG("get nvme ctrlr failed\n"); return -ENODEV; @@ -471,7 +471,7 @@ vbdev_opal_destruct_bdev(struct opal_vbdev *opal_bdev) int vbdev_opal_destruct(const char *bdev_name, const char *password) { - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; int locking_range_id; int rc; struct opal_vbdev *opal_bdev; @@ -531,7 +531,7 @@ int vbdev_opal_set_lock_state(const char *bdev_name, uint16_t user_id, const char *password, const char *lock_state) { - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; int locking_range_id; int rc; enum spdk_opal_lock_state state_flag; @@ -579,7 +579,7 @@ int vbdev_opal_enable_new_user(const char *bdev_name, const char *admin_password, uint16_t user_id, const char *user_password) { - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; int locking_range_id; int rc; struct opal_vbdev *opal_bdev; diff --git a/module/bdev/nvme/vbdev_opal_rpc.c b/module/bdev/nvme/vbdev_opal_rpc.c index b400edab3b..73332b8960 100644 --- a/module/bdev/nvme/vbdev_opal_rpc.c +++ b/module/bdev/nvme/vbdev_opal_rpc.c @@ -60,7 +60,7 @@ rpc_bdev_nvme_opal_init(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) { struct rpc_bdev_nvme_opal_init req = {}; - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; int rc; if (spdk_json_decode_object(params, rpc_bdev_nvme_opal_init_decoders, @@ -72,7 +72,7 @@ rpc_bdev_nvme_opal_init(struct spdk_jsonrpc_request *request, } /* check if opal supported */ - nvme_ctrlr = nvme_bdev_ctrlr_get_by_name(req.nvme_ctrlr_name); + nvme_ctrlr = nvme_ctrlr_get_by_name(req.nvme_ctrlr_name); if (nvme_ctrlr == NULL || nvme_ctrlr->opal_dev == NULL) { SPDK_ERRLOG("%s not support opal\n", req.nvme_ctrlr_name); spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); @@ -135,7 +135,7 @@ rpc_bdev_nvme_opal_revert(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) { struct rpc_bdev_nvme_opal_revert req = {}; - struct nvme_bdev_ctrlr *nvme_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; int rc; if (spdk_json_decode_object(params, rpc_bdev_nvme_opal_revert_decoders, @@ -147,7 +147,7 @@ rpc_bdev_nvme_opal_revert(struct spdk_jsonrpc_request *request, } /* check if opal supported */ - nvme_ctrlr = nvme_bdev_ctrlr_get_by_name(req.nvme_ctrlr_name); + nvme_ctrlr = nvme_ctrlr_get_by_name(req.nvme_ctrlr_name); if (nvme_ctrlr == NULL || nvme_ctrlr->opal_dev == NULL) { SPDK_ERRLOG("%s not support opal\n", req.nvme_ctrlr_name); spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters"); diff --git a/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c b/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c index 509c2bc400..32c1a6424a 100644 --- a/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c +++ b/test/unit/lib/bdev/nvme/bdev_nvme.c/bdev_nvme_ut.c @@ -200,7 +200,7 @@ DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_c DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); -DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, +DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns, struct nvme_async_probe_ctx *ctx)); DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_ns *nvme_ns)); @@ -212,11 +212,11 @@ DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_ctrlr_channel *ioch) DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_ctrlr_channel *ioch)); -DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr), 0); +DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_ctrlr *nvme_ctrlr), 0); -DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)); +DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_ctrlr *nvme_ctrlr)); -DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)); +DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_ctrlr *nvme_ctrlr)); DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iov, @@ -1007,19 +1007,19 @@ test_create_ctrlr(void) ut_init_trid(&trid); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); rc = bdev_nvme_delete("nvme0", NULL); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -1027,7 +1027,7 @@ test_reset_ctrlr(void) { struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_ctrlr ctrlr = {}; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; + struct nvme_ctrlr *nvme_ctrlr = NULL; struct nvme_ctrlr_trid *curr_trid; struct spdk_io_channel *ch1, *ch2; struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; @@ -1038,16 +1038,16 @@ test_reset_ctrlr(void) set_thread(0); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); CU_ASSERT(rc == 0); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); + curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); SPDK_CU_ASSERT_FATAL(curr_trid != NULL); - ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch1 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch1 != NULL); ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); @@ -1055,7 +1055,7 @@ test_reset_ctrlr(void) set_thread(1); - ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch2 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch2 != NULL); ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); @@ -1065,26 +1065,26 @@ test_reset_ctrlr(void) set_thread(1); /* Case 1: ctrlr is already being destructed. */ - nvme_bdev_ctrlr->destruct = true; + nvme_ctrlr->destruct = true; - rc = _bdev_nvme_reset(nvme_bdev_ctrlr); + rc = _bdev_nvme_reset(nvme_ctrlr); CU_ASSERT(rc == -EBUSY); /* Case 2: reset is in progress. */ - nvme_bdev_ctrlr->destruct = false; - nvme_bdev_ctrlr->resetting = true; + nvme_ctrlr->destruct = false; + nvme_ctrlr->resetting = true; - rc = _bdev_nvme_reset(nvme_bdev_ctrlr); + rc = _bdev_nvme_reset(nvme_ctrlr); CU_ASSERT(rc == -EAGAIN); /* Case 3: reset completes successfully. */ - nvme_bdev_ctrlr->resetting = false; + nvme_ctrlr->resetting = false; curr_trid->is_failed = true; ctrlr.is_failed = true; - rc = _bdev_nvme_reset(nvme_bdev_ctrlr); + rc = _bdev_nvme_reset(nvme_ctrlr); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr->resetting == true); CU_ASSERT(ctrlr_ch1->qpair != NULL); CU_ASSERT(ctrlr_ch2->qpair != NULL); @@ -1107,11 +1107,11 @@ test_reset_ctrlr(void) poll_thread_times(1, 1); CU_ASSERT(ctrlr_ch1->qpair != NULL); CU_ASSERT(ctrlr_ch2->qpair != NULL); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr->resetting == true); CU_ASSERT(curr_trid->is_failed == true); poll_thread_times(1, 1); - CU_ASSERT(nvme_bdev_ctrlr->resetting == false); + CU_ASSERT(nvme_ctrlr->resetting == false); CU_ASSERT(curr_trid->is_failed == false); spdk_put_io_channel(ch2); @@ -1127,7 +1127,7 @@ test_reset_ctrlr(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -1135,7 +1135,7 @@ test_race_between_reset_and_destruct_ctrlr(void) { struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_ctrlr ctrlr = {}; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct spdk_io_channel *ch1, *ch2; int rc; @@ -1144,45 +1144,45 @@ test_race_between_reset_and_destruct_ctrlr(void) set_thread(0); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); CU_ASSERT(rc == 0); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch1 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch1 != NULL); set_thread(1); - ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch2 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch2 != NULL); /* Reset starts from thread 1. */ set_thread(1); - rc = _bdev_nvme_reset(nvme_bdev_ctrlr); + rc = _bdev_nvme_reset(nvme_ctrlr); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr->resetting == true); /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ set_thread(0); rc = bdev_nvme_delete("nvme0", NULL); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); - CU_ASSERT(nvme_bdev_ctrlr->destruct == true); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); + CU_ASSERT(nvme_ctrlr->destruct == true); + CU_ASSERT(nvme_ctrlr->resetting == true); poll_threads(); /* Reset completed but ctrlr is not still destructed yet. */ - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); - CU_ASSERT(nvme_bdev_ctrlr->destruct == true); - CU_ASSERT(nvme_bdev_ctrlr->resetting == false); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); + CU_ASSERT(nvme_ctrlr->destruct == true); + CU_ASSERT(nvme_ctrlr->resetting == false); /* New reset request is rejected. */ - rc = _bdev_nvme_reset(nvme_bdev_ctrlr); + rc = _bdev_nvme_reset(nvme_ctrlr); CU_ASSERT(rc == -EBUSY); /* Additional polling called spdk_io_device_unregister() to ctrlr, @@ -1190,7 +1190,7 @@ test_race_between_reset_and_destruct_ctrlr(void) */ poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); set_thread(0); @@ -1202,7 +1202,7 @@ test_race_between_reset_and_destruct_ctrlr(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -1210,7 +1210,7 @@ test_failover_ctrlr(void) { struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; struct spdk_nvme_ctrlr ctrlr = {}; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; + struct nvme_ctrlr *nvme_ctrlr = NULL; struct nvme_ctrlr_trid *curr_trid, *next_trid; struct spdk_io_channel *ch1, *ch2; int rc; @@ -1221,112 +1221,112 @@ test_failover_ctrlr(void) set_thread(0); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); CU_ASSERT(rc == 0); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch1 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch1 != NULL); set_thread(1); - ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch2 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch2 != NULL); /* First, test one trid case. */ - curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); + curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); SPDK_CU_ASSERT_FATAL(curr_trid != NULL); /* Failover starts from thread 1. */ set_thread(1); /* Case 1: ctrlr is already being destructed. */ - nvme_bdev_ctrlr->destruct = true; + nvme_ctrlr->destruct = true; - rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); + rc = bdev_nvme_failover(nvme_ctrlr, false); CU_ASSERT(rc == 0); CU_ASSERT(curr_trid->is_failed == false); /* Case 2: reset is in progress. */ - nvme_bdev_ctrlr->destruct = false; - nvme_bdev_ctrlr->resetting = true; + nvme_ctrlr->destruct = false; + nvme_ctrlr->resetting = true; - rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); + rc = bdev_nvme_failover(nvme_ctrlr, false); CU_ASSERT(rc == 0); /* Case 3: failover is in progress. */ - nvme_bdev_ctrlr->failover_in_progress = true; + nvme_ctrlr->failover_in_progress = true; - rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); + rc = bdev_nvme_failover(nvme_ctrlr, false); CU_ASSERT(rc == 0); CU_ASSERT(curr_trid->is_failed == false); /* Case 4: reset completes successfully. */ - nvme_bdev_ctrlr->resetting = false; - nvme_bdev_ctrlr->failover_in_progress = false; + nvme_ctrlr->resetting = false; + nvme_ctrlr->failover_in_progress = false; - rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); + rc = bdev_nvme_failover(nvme_ctrlr, false); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr->resetting == true); CU_ASSERT(curr_trid->is_failed == true); poll_threads(); - curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); + curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); SPDK_CU_ASSERT_FATAL(curr_trid != NULL); - CU_ASSERT(nvme_bdev_ctrlr->resetting == false); + CU_ASSERT(nvme_ctrlr->resetting == false); CU_ASSERT(curr_trid->is_failed == false); set_thread(0); /* Second, test two trids case. */ - rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2); + rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); CU_ASSERT(rc == 0); - curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); + curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); SPDK_CU_ASSERT_FATAL(curr_trid != NULL); - CU_ASSERT(&curr_trid->trid == nvme_bdev_ctrlr->connected_trid); + CU_ASSERT(&curr_trid->trid == nvme_ctrlr->connected_trid); CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); /* Failover starts from thread 1. */ set_thread(1); /* Case 5: reset is in progress. */ - nvme_bdev_ctrlr->resetting = true; + nvme_ctrlr->resetting = true; - rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); + rc = bdev_nvme_failover(nvme_ctrlr, false); CU_ASSERT(rc == -EAGAIN); /* Case 5: failover is in progress. */ - nvme_bdev_ctrlr->failover_in_progress = true; + nvme_ctrlr->failover_in_progress = true; - rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); + rc = bdev_nvme_failover(nvme_ctrlr, false); CU_ASSERT(rc == 0); /* Case 6: failover completes successfully. */ - nvme_bdev_ctrlr->resetting = false; - nvme_bdev_ctrlr->failover_in_progress = false; + nvme_ctrlr->resetting = false; + nvme_ctrlr->failover_in_progress = false; - rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); + rc = bdev_nvme_failover(nvme_ctrlr, false); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); - CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == true); + CU_ASSERT(nvme_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr->failover_in_progress == true); - next_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); + next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); SPDK_CU_ASSERT_FATAL(next_trid != NULL); CU_ASSERT(next_trid != curr_trid); - CU_ASSERT(&next_trid->trid == nvme_bdev_ctrlr->connected_trid); + CU_ASSERT(&next_trid->trid == nvme_ctrlr->connected_trid); CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr->resetting == false); - CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == false); + CU_ASSERT(nvme_ctrlr->resetting == false); + CU_ASSERT(nvme_ctrlr->failover_in_progress == false); spdk_put_io_channel(ch2); @@ -1341,7 +1341,7 @@ test_failover_ctrlr(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -1357,7 +1357,7 @@ test_pending_reset(void) struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_host_id hostid = {}; struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; + struct nvme_ctrlr *nvme_ctrlr = NULL; const int STRING_SIZE = 32; const char *attached_names[STRING_SIZE]; struct spdk_bdev_io *first_bdev_io, *second_bdev_io; @@ -1391,17 +1391,17 @@ test_pending_reset(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch1 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch1 != NULL); ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); set_thread(1); - ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch2 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch2 != NULL); ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); @@ -1411,7 +1411,7 @@ test_pending_reset(void) */ rc = bdev_nvme_reset(ctrlr_ch2, first_bdev_io); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr->resetting == true); CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); set_thread(0); @@ -1422,7 +1422,7 @@ test_pending_reset(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr->resetting == false); + CU_ASSERT(nvme_ctrlr->resetting == false); CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); @@ -1436,7 +1436,7 @@ test_pending_reset(void) rc = bdev_nvme_reset(ctrlr_ch2, first_bdev_io); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); + CU_ASSERT(nvme_ctrlr->resetting == true); CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); set_thread(0); @@ -1449,7 +1449,7 @@ test_pending_reset(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr->resetting == false); + CU_ASSERT(nvme_ctrlr->resetting == false); CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); @@ -1468,7 +1468,7 @@ test_pending_reset(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); free(first_bdev_io); free(second_bdev_io); @@ -1480,7 +1480,7 @@ test_attach_ctrlr(void) struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_host_id hostid = {}; struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; const int STRING_SIZE = 32; const char *attached_names[STRING_SIZE]; struct nvme_bdev *nbdev; @@ -1491,7 +1491,7 @@ test_attach_ctrlr(void) memset(attached_names, 0, sizeof(char *) * STRING_SIZE); ut_init_trid(&trid); - /* If ctrlr fails, no nvme_bdev_ctrlr is created. Failed ctrlr is removed + /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed * by probe polling. */ ctrlr = ut_attach_ctrlr(&trid, 0); @@ -1508,9 +1508,9 @@ test_attach_ctrlr(void) spdk_delay_us(1000); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); - /* If ctrlr has no namespace, one nvme_bdev_ctrlr with no namespace is created */ + /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ ctrlr = ut_attach_ctrlr(&trid, 0); SPDK_CU_ASSERT_FATAL(ctrlr != NULL); @@ -1523,19 +1523,19 @@ test_attach_ctrlr(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); - CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); - CU_ASSERT(nvme_bdev_ctrlr->num_ns == 0); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); + CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); + CU_ASSERT(nvme_ctrlr->num_ns == 0); rc = bdev_nvme_delete("nvme0", NULL); CU_ASSERT(rc == 0); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); - /* If ctrlr has one namespace, one nvme_bdev_ctrlr with one namespace and + /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and * one nvme_bdev is created. */ ctrlr = ut_attach_ctrlr(&trid, 1); @@ -1550,15 +1550,15 @@ test_attach_ctrlr(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); - CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); - CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); + CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); + CU_ASSERT(nvme_ctrlr->num_ns == 1); CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); attached_names[0] = NULL; - nbdev = nvme_bdev_ctrlr->namespaces[0]->bdev; + nbdev = nvme_ctrlr->namespaces[0]->bdev; SPDK_CU_ASSERT_FATAL(nbdev != NULL); CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); @@ -1567,9 +1567,9 @@ test_attach_ctrlr(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); - /* Ctrlr has one namespace but one nvme_bdev_ctrlr with no namespace is + /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is * created because creating one nvme_bdev failed. */ ctrlr = ut_attach_ctrlr(&trid, 1); @@ -1585,10 +1585,10 @@ test_attach_ctrlr(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); - CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); - CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); + CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); + CU_ASSERT(nvme_ctrlr->num_ns == 1); CU_ASSERT(attached_names[0] == NULL); @@ -1597,7 +1597,7 @@ test_attach_ctrlr(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); g_ut_register_bdev_status = 0; } @@ -1607,7 +1607,7 @@ test_reconnect_qpair(void) { struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_ctrlr ctrlr = {}; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; + struct nvme_ctrlr *nvme_ctrlr = NULL; struct spdk_io_channel *ch; struct nvme_ctrlr_channel *ctrlr_ch; int rc; @@ -1617,13 +1617,13 @@ test_reconnect_qpair(void) ut_init_trid(&trid); TAILQ_INIT(&ctrlr.active_io_qpairs); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); CU_ASSERT(rc == 0); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - ch = spdk_get_io_channel(nvme_bdev_ctrlr); + ch = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch != NULL); ctrlr_ch = spdk_io_channel_get_ctx(ch); @@ -1656,7 +1656,7 @@ test_reconnect_qpair(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -1665,7 +1665,7 @@ test_aer_cb(void) struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_host_id hostid = {}; struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct nvme_bdev *bdev; const int STRING_SIZE = 32; const char *attached_names[STRING_SIZE]; @@ -1696,16 +1696,16 @@ test_aer_cb(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - CU_ASSERT(nvme_bdev_ctrlr->num_ns == 4); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == false); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == true); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true); + CU_ASSERT(nvme_ctrlr->num_ns == 4); + CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == false); + CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true); + CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == true); + CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true); - bdev = nvme_bdev_ctrlr->namespaces[3]->bdev; + bdev = nvme_ctrlr->namespaces[3]->bdev; SPDK_CU_ASSERT_FATAL(bdev != NULL); CU_ASSERT(bdev->disk.blockcnt == 1024); @@ -1720,12 +1720,12 @@ test_aer_cb(void) event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; cpl.cdw0 = event.raw; - aer_cb(nvme_bdev_ctrlr, &cpl); + aer_cb(nvme_ctrlr, &cpl); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == true); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == false); - CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true); + CU_ASSERT(nvme_ctrlr->namespaces[0]->populated == true); + CU_ASSERT(nvme_ctrlr->namespaces[1]->populated == true); + CU_ASSERT(nvme_ctrlr->namespaces[2]->populated == false); + CU_ASSERT(nvme_ctrlr->namespaces[3]->populated == true); CU_ASSERT(bdev->disk.blockcnt == 2048); rc = bdev_nvme_delete("nvme0", NULL); @@ -1733,7 +1733,7 @@ test_aer_cb(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -1848,7 +1848,7 @@ test_submit_nvme_cmd(void) struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_host_id hostid = {}; struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; const int STRING_SIZE = 32; const char *attached_names[STRING_SIZE]; struct nvme_bdev *bdev; @@ -1874,15 +1874,15 @@ test_submit_nvme_cmd(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - bdev = nvme_bdev_ctrlr->namespaces[0]->bdev; + bdev = nvme_ctrlr->namespaces[0]->bdev; SPDK_CU_ASSERT_FATAL(bdev != NULL); set_thread(0); - ch = spdk_get_io_channel(nvme_bdev_ctrlr); + ch = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch != NULL); bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); @@ -1920,7 +1920,7 @@ test_submit_nvme_cmd(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -1928,7 +1928,7 @@ test_remove_trid(void) { struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; struct spdk_nvme_ctrlr ctrlr = {}; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; + struct nvme_ctrlr *nvme_ctrlr = NULL; struct nvme_ctrlr_trid *ctrid; int rc; @@ -1938,13 +1938,13 @@ test_remove_trid(void) set_thread(0); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); CU_ASSERT(rc == 0); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2); + rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); CU_ASSERT(rc == 0); /* trid3 is not in the registered list. */ @@ -1954,12 +1954,12 @@ test_remove_trid(void) /* trid2 is not used, and simply removed. */ rc = bdev_nvme_delete("nvme0", &trid2); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); - TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) { + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); + TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0); } - rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid3); + rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); CU_ASSERT(rc == 0); /* trid1 is currently used and trid3 is an alternative path. @@ -1967,45 +1967,45 @@ test_remove_trid(void) */ rc = bdev_nvme_delete("nvme0", &trid1); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); - CU_ASSERT(nvme_bdev_ctrlr->resetting == true); - TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) { + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); + CU_ASSERT(nvme_ctrlr->resetting == true); + TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0); } - CU_ASSERT(spdk_nvme_transport_id_compare(nvme_bdev_ctrlr->connected_trid, &trid3) == 0); + CU_ASSERT(spdk_nvme_transport_id_compare(nvme_ctrlr->connected_trid, &trid3) == 0); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr->resetting == false); + CU_ASSERT(nvme_ctrlr->resetting == false); /* trid3 is the current and only path. If we remove trid3, the corresponding - * nvme_bdev_ctrlr is removed. + * nvme_ctrlr is removed. */ rc = bdev_nvme_delete("nvme0", &trid3); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); CU_ASSERT(rc == 0); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - rc = bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2); + rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); CU_ASSERT(rc == 0); - /* If trid is not specified, nvme_bdev_ctrlr itself is removed. */ + /* If trid is not specified, nvme_ctrlr itself is removed. */ rc = bdev_nvme_delete("nvme0", NULL); CU_ASSERT(rc == 0); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -2014,7 +2014,7 @@ test_abort(void) struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_host_id hostid = {}; struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; const int STRING_SIZE = 32; const char *attached_names[STRING_SIZE]; struct nvme_bdev *bdev; @@ -2045,10 +2045,10 @@ test_abort(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - bdev = nvme_bdev_ctrlr->namespaces[0]->bdev; + bdev = nvme_ctrlr->namespaces[0]->bdev; SPDK_CU_ASSERT_FATAL(bdev != NULL); write_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); @@ -2070,13 +2070,13 @@ test_abort(void) set_thread(0); - ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch1 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch1 != NULL); ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); set_thread(1); - ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); + ch2 = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch2 != NULL); write_io->internal.ch = (struct spdk_bdev_channel *)ch1; @@ -2188,7 +2188,7 @@ test_abort(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void @@ -2196,7 +2196,7 @@ test_get_io_qpair(void) { struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_ctrlr ctrlr = {}; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; + struct nvme_ctrlr *nvme_ctrlr = NULL; struct spdk_io_channel *ch; struct nvme_ctrlr_channel *ctrlr_ch; struct spdk_nvme_qpair *qpair; @@ -2207,13 +2207,13 @@ test_get_io_qpair(void) set_thread(0); - rc = nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); + rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); CU_ASSERT(rc == 0); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - ch = spdk_get_io_channel(nvme_bdev_ctrlr); + ch = spdk_get_io_channel(nvme_ctrlr); SPDK_CU_ASSERT_FATAL(ch != NULL); ctrlr_ch = spdk_io_channel_get_ctx(ch); CU_ASSERT(ctrlr_ch->qpair != NULL); @@ -2228,7 +2228,7 @@ test_get_io_qpair(void) poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } /* Test a scenario that the bdev subsystem starts shutdown when there still exists @@ -2242,7 +2242,7 @@ test_bdev_unregister(void) struct spdk_nvme_transport_id trid = {}; struct spdk_nvme_host_id hostid = {}; struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct nvme_ns *nvme_ns1, *nvme_ns2; const int STRING_SIZE = 32; const char *attached_names[STRING_SIZE]; @@ -2265,16 +2265,16 @@ test_bdev_unregister(void) spdk_delay_us(1000); poll_threads(); - nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - nvme_ns1 = nvme_bdev_ctrlr->namespaces[0]; + nvme_ns1 = nvme_ctrlr->namespaces[0]; SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); bdev1 = nvme_ns1->bdev; SPDK_CU_ASSERT_FATAL(bdev1 != NULL); - nvme_ns2 = nvme_bdev_ctrlr->namespaces[1]; + nvme_ns2 = nvme_ctrlr->namespaces[1]; SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); bdev2 = nvme_ns2->bdev; @@ -2288,12 +2288,12 @@ test_bdev_unregister(void) CU_ASSERT(nvme_ns1->bdev == NULL); CU_ASSERT(nvme_ns2->bdev == NULL); - nvme_bdev_ctrlr->destruct = true; - _nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr); + nvme_ctrlr->destruct = true; + _nvme_ctrlr_destruct(nvme_ctrlr); poll_threads(); - CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); + CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); } static void diff --git a/test/unit/lib/bdev/nvme/bdev_ocssd.c/bdev_ocssd_ut.c b/test/unit/lib/bdev/nvme/bdev_ocssd.c/bdev_ocssd_ut.c index aa1ef81c24..9b4fe794f2 100644 --- a/test/unit/lib/bdev/nvme/bdev_ocssd.c/bdev_ocssd_ut.c +++ b/test/unit/lib/bdev/nvme/bdev_ocssd.c/bdev_ocssd_ut.c @@ -110,7 +110,7 @@ find_controller(const struct spdk_nvme_transport_id *trid) static void free_controller(struct spdk_nvme_ctrlr *ctrlr) { - CU_ASSERT(!nvme_bdev_ctrlr_get(&ctrlr->trid)); + CU_ASSERT(!nvme_ctrlr_get(&ctrlr->trid)); LIST_REMOVE(ctrlr, list); spdk_nvme_ctrlr_free_io_qpair(ctrlr->admin_qpair); free(ctrlr->chunk_info); @@ -197,11 +197,11 @@ nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx, ns->ctrlr->ref++; } -static struct nvme_bdev_ctrlr * +static struct nvme_ctrlr * create_nvme_bdev_controller(const struct spdk_nvme_transport_id *trid, const char *name) { struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct nvme_ctrlr_trid *trid_entry; uint32_t nsid; int rc; @@ -209,48 +209,48 @@ create_nvme_bdev_controller(const struct spdk_nvme_transport_id *trid, const cha ctrlr = find_controller(trid); SPDK_CU_ASSERT_FATAL(ctrlr != NULL); - SPDK_CU_ASSERT_FATAL(!nvme_bdev_ctrlr_get(trid)); + SPDK_CU_ASSERT_FATAL(!nvme_ctrlr_get(trid)); - nvme_bdev_ctrlr = calloc(1, sizeof(*nvme_bdev_ctrlr)); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = calloc(1, sizeof(*nvme_ctrlr)); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - rc = pthread_mutex_init(&nvme_bdev_ctrlr->mutex, NULL); + rc = pthread_mutex_init(&nvme_ctrlr->mutex, NULL); SPDK_CU_ASSERT_FATAL(rc == 0); - nvme_bdev_ctrlr->namespaces = calloc(ctrlr->ns_count, sizeof(struct nvme_ns *)); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces != NULL); + nvme_ctrlr->namespaces = calloc(ctrlr->ns_count, sizeof(struct nvme_ns *)); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces != NULL); trid_entry = calloc(1, sizeof(struct nvme_ctrlr_trid)); SPDK_CU_ASSERT_FATAL(trid_entry != NULL); trid_entry->trid = *trid; - nvme_bdev_ctrlr->ctrlr = ctrlr; - nvme_bdev_ctrlr->num_ns = ctrlr->ns_count; - nvme_bdev_ctrlr->ref = 1; - nvme_bdev_ctrlr->connected_trid = &trid_entry->trid; - nvme_bdev_ctrlr->name = strdup(name); + nvme_ctrlr->ctrlr = ctrlr; + nvme_ctrlr->num_ns = ctrlr->ns_count; + nvme_ctrlr->ref = 1; + nvme_ctrlr->connected_trid = &trid_entry->trid; + nvme_ctrlr->name = strdup(name); for (nsid = 0; nsid < ctrlr->ns_count; ++nsid) { - nvme_bdev_ctrlr->namespaces[nsid] = calloc(1, sizeof(struct nvme_ns)); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[nsid] != NULL); + nvme_ctrlr->namespaces[nsid] = calloc(1, sizeof(struct nvme_ns)); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces[nsid] != NULL); - nvme_bdev_ctrlr->namespaces[nsid]->id = nsid + 1; - nvme_bdev_ctrlr->namespaces[nsid]->ctrlr = nvme_bdev_ctrlr; - nvme_bdev_ctrlr->namespaces[nsid]->type = NVME_NS_OCSSD; + nvme_ctrlr->namespaces[nsid]->id = nsid + 1; + nvme_ctrlr->namespaces[nsid]->ctrlr = nvme_ctrlr; + nvme_ctrlr->namespaces[nsid]->type = NVME_NS_OCSSD; - bdev_ocssd_populate_namespace(nvme_bdev_ctrlr, nvme_bdev_ctrlr->namespaces[nsid], NULL); + bdev_ocssd_populate_namespace(nvme_ctrlr, nvme_ctrlr->namespaces[nsid], NULL); } while (spdk_thread_poll(g_thread, 0, 0) > 0) {} - spdk_io_device_register(nvme_bdev_ctrlr, io_channel_create_cb, + spdk_io_device_register(nvme_ctrlr, io_channel_create_cb, io_channel_destroy_cb, 0, name); - TAILQ_INSERT_TAIL(&g_nvme_bdev_ctrlrs, nvme_bdev_ctrlr, tailq); + TAILQ_INSERT_TAIL(&g_nvme_ctrlrs, nvme_ctrlr, tailq); - TAILQ_INIT(&nvme_bdev_ctrlr->trids); - TAILQ_INSERT_HEAD(&nvme_bdev_ctrlr->trids, trid_entry, link); + TAILQ_INIT(&nvme_ctrlr->trids); + TAILQ_INSERT_HEAD(&nvme_ctrlr->trids, trid_entry, link); - return nvme_bdev_ctrlr; + return nvme_ctrlr; } static struct nvme_request * @@ -530,29 +530,29 @@ create_bdev(const char *ctrlr_name, const char *bdev_name, uint32_t nsid) } static void -delete_nvme_bdev_controller(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr) +delete_nvme_bdev_controller(struct nvme_ctrlr *nvme_ctrlr) { uint32_t nsid; - nvme_bdev_ctrlr->destruct = true; + nvme_ctrlr->destruct = true; - for (nsid = 0; nsid < nvme_bdev_ctrlr->num_ns; ++nsid) { - bdev_ocssd_depopulate_namespace(nvme_bdev_ctrlr->namespaces[nsid]); + for (nsid = 0; nsid < nvme_ctrlr->num_ns; ++nsid) { + bdev_ocssd_depopulate_namespace(nvme_ctrlr->namespaces[nsid]); } - nvme_bdev_ctrlr_release(nvme_bdev_ctrlr); + nvme_ctrlr_release(nvme_ctrlr); spdk_delay_us(1000); while (spdk_thread_poll(g_thread, 0, 0) > 0) {} - CU_ASSERT(TAILQ_EMPTY(&g_nvme_bdev_ctrlrs)); + CU_ASSERT(TAILQ_EMPTY(&g_nvme_ctrlrs)); } static void test_create_controller(void) { struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; struct spdk_ocssd_geometry_data geometry = {}; struct spdk_bdev *bdev; @@ -579,7 +579,7 @@ test_create_controller(void) }; ctrlr = create_controller(&trid, ns_count, &geometry); - nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name); for (nsid = 1; nsid <= ns_count; ++nsid) { snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid); @@ -591,10 +591,10 @@ test_create_controller(void) CU_ASSERT_TRUE(bdev->zoned); } - delete_nvme_bdev_controller(nvme_bdev_ctrlr); + delete_nvme_bdev_controller(nvme_ctrlr); /* Verify that after deletion the bdevs can still be created */ - nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name); for (nsid = 1; nsid <= ns_count; ++nsid) { snprintf(namebuf, sizeof(namebuf), "%sn%"PRIu32, controller_name, nsid); @@ -606,7 +606,7 @@ test_create_controller(void) CU_ASSERT_TRUE(bdev->zoned); } - delete_nvme_bdev_controller(nvme_bdev_ctrlr); + delete_nvme_bdev_controller(nvme_ctrlr); free_controller(ctrlr); } @@ -615,7 +615,7 @@ static void test_device_geometry(void) { struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; const char *controller_name = "nvme0"; const char *bdev_name = "nvme0n1"; @@ -640,7 +640,7 @@ test_device_geometry(void) }; ctrlr = create_controller(&trid, 1, &geometry); - nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name); rc = create_bdev(controller_name, bdev_name, 1); CU_ASSERT_EQUAL(rc, 0); @@ -655,7 +655,7 @@ test_device_geometry(void) CU_ASSERT_EQUAL(bdev->max_open_zones, geometry.maxocpu); CU_ASSERT_EQUAL(bdev->write_unit_size, geometry.ws_opt); - delete_nvme_bdev_controller(nvme_bdev_ctrlr); + delete_nvme_bdev_controller(nvme_ctrlr); free_controller(ctrlr); } @@ -687,7 +687,7 @@ static void test_lba_translation(void) { struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; const char *controller_name = "nvme0"; const char *bdev_name = "nvme0n1"; @@ -711,11 +711,11 @@ test_lba_translation(void) }; ctrlr = create_controller(&trid, 1, &geometry); - nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[0] != NULL); - ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_bdev_ctrlr->namespaces[0]); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces[0] != NULL); + ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ctrlr->namespaces[0]); rc = create_bdev(controller_name, bdev_name, 1); CU_ASSERT_EQUAL(rc, 0); @@ -749,7 +749,7 @@ test_lba_translation(void) CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 1, 0)); CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_ns, lba), bdev->zone_size + 68); - delete_nvme_bdev_controller(nvme_bdev_ctrlr); + delete_nvme_bdev_controller(nvme_ctrlr); free_controller(ctrlr); geometry = (struct spdk_ocssd_geometry_data) { @@ -766,11 +766,11 @@ test_lba_translation(void) }; ctrlr = create_controller(&trid, 1, &geometry); - nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); + nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); - SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr->namespaces[0] != NULL); - ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_bdev_ctrlr->namespaces[0]); + SPDK_CU_ASSERT_FATAL(nvme_ctrlr->namespaces[0] != NULL); + ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ctrlr->namespaces[0]); rc = create_bdev(controller_name, bdev_name, 1); CU_ASSERT_EQUAL(rc, 0); @@ -807,7 +807,7 @@ test_lba_translation(void) CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_ns, lba), bdev->zone_size * geometry.num_pu * geometry.num_grp + 68); - delete_nvme_bdev_controller(nvme_bdev_ctrlr); + delete_nvme_bdev_controller(nvme_ctrlr); free_controller(ctrlr); } @@ -878,7 +878,7 @@ static void test_get_zone_info(void) { struct spdk_nvme_ctrlr *ctrlr; - struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; + struct nvme_ctrlr *nvme_ctrlr; struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" }; const char *controller_name = "nvme0"; const char *bdev_name = "nvme0n1"; @@ -907,7 +907,7 @@ test_get_zone_info(void) }; ctrlr = create_controller(&trid, 1, &geometry); - nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name); + nvme_ctrlr = create_nvme_bdev_controller(&trid, controller_name); rc = create_bdev(controller_name, bdev_name, 1); CU_ASSERT_EQUAL(rc, 0); @@ -919,7 +919,7 @@ test_get_zone_info(void) SPDK_CU_ASSERT_FATAL(ch != NULL); ctrlr_ch = spdk_io_channel_get_ctx(ch); - ctrlr_ch->ctrlr = nvme_bdev_ctrlr; + ctrlr_ch->ctrlr = nvme_ctrlr; ctrlr_ch->qpair = (struct spdk_nvme_qpair *)0x1; bdev_io = alloc_ocssd_io(); @@ -1033,7 +1033,7 @@ test_get_zone_info(void) g_chunk_info_cpl = (struct spdk_nvme_cpl) {}; g_zone_info_status = true; - delete_nvme_bdev_controller(nvme_bdev_ctrlr); + delete_nvme_bdev_controller(nvme_ctrlr); free(bdev_io); free(ch);