bdev/nvme: Rename nvme_io_channel by nvme_io_path
By the next patch, nvme_io_channel will be used as an I/O channel to a single nvme_bdev. This channel is created to a single nvme_bdev_ctrlr and has a corresponding I/O qpair. nvme_io_path will be a better name especially when we support multipath. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: Ic34162f3c383676c5249396a09173329fc6febce Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8095 Reviewed-by: Ziye Yang <ziye.yang@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: <dongx.yi@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
5863f95ae4
commit
fdc4c13e9e
@ -176,7 +176,7 @@ static int bdev_nvme_get_zone_info(struct spdk_nvme_ns *ns, struct spdk_nvme_qpa
|
||||
static int bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_bdev_io *bio, uint64_t zone_id,
|
||||
enum spdk_bdev_zone_action action);
|
||||
static int bdev_nvme_admin_passthru(struct nvme_io_channel *nvme_ch,
|
||||
static int bdev_nvme_admin_passthru(struct nvme_io_path *io_path,
|
||||
struct nvme_bdev_io *bio,
|
||||
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes);
|
||||
static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
@ -185,9 +185,9 @@ static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
|
||||
static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
struct nvme_bdev_io *bio,
|
||||
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len);
|
||||
static int bdev_nvme_abort(struct nvme_io_channel *nvme_ch,
|
||||
static int bdev_nvme_abort(struct nvme_io_path *io_path,
|
||||
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
|
||||
static int bdev_nvme_reset(struct nvme_io_channel *nvme_ch, struct spdk_bdev_io *bdev_io);
|
||||
static int bdev_nvme_reset(struct nvme_io_path *io_path, struct spdk_bdev_io *bdev_io);
|
||||
static int bdev_nvme_failover(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove);
|
||||
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);
|
||||
|
||||
@ -223,15 +223,15 @@ static config_json_namespace_fn g_config_json_namespace_fn[] = {
|
||||
};
|
||||
|
||||
struct spdk_nvme_qpair *
|
||||
bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch)
|
||||
bdev_nvme_get_io_qpair(struct spdk_io_channel *io_path_ch)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch;
|
||||
struct nvme_io_path *io_path;
|
||||
|
||||
assert(ctrlr_io_ch != NULL);
|
||||
assert(io_path_ch != NULL);
|
||||
|
||||
nvme_ch = spdk_io_channel_get_ctx(ctrlr_io_ch);
|
||||
io_path = spdk_io_channel_get_ctx(io_path_ch);
|
||||
|
||||
return nvme_ch->qpair;
|
||||
return io_path->qpair;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -368,9 +368,9 @@ bdev_nvme_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
}
|
||||
|
||||
static int
|
||||
bdev_nvme_create_qpair(struct nvme_io_channel *nvme_ch)
|
||||
bdev_nvme_create_qpair(struct nvme_io_path *io_path)
|
||||
{
|
||||
struct spdk_nvme_ctrlr *ctrlr = nvme_ch->ctrlr->ctrlr;
|
||||
struct spdk_nvme_ctrlr *ctrlr = io_path->ctrlr->ctrlr;
|
||||
struct spdk_nvme_io_qpair_opts opts;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
int rc;
|
||||
@ -386,9 +386,9 @@ bdev_nvme_create_qpair(struct nvme_io_channel *nvme_ch)
|
||||
return -1;
|
||||
}
|
||||
|
||||
assert(nvme_ch->group != NULL);
|
||||
assert(io_path->group != NULL);
|
||||
|
||||
rc = spdk_nvme_poll_group_add(nvme_ch->group->group, qpair);
|
||||
rc = spdk_nvme_poll_group_add(io_path->group->group, qpair);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("Unable to begin polling on NVMe Channel.\n");
|
||||
goto err;
|
||||
@ -400,7 +400,7 @@ bdev_nvme_create_qpair(struct nvme_io_channel *nvme_ch)
|
||||
goto err;
|
||||
}
|
||||
|
||||
nvme_ch->qpair = qpair;
|
||||
io_path->qpair = qpair;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -411,17 +411,17 @@ err:
|
||||
}
|
||||
|
||||
static int
|
||||
bdev_nvme_destroy_qpair(struct nvme_io_channel *nvme_ch)
|
||||
bdev_nvme_destroy_qpair(struct nvme_io_path *io_path)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (nvme_ch->qpair == NULL) {
|
||||
if (io_path->qpair == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = spdk_nvme_ctrlr_free_io_qpair(nvme_ch->qpair);
|
||||
rc = spdk_nvme_ctrlr_free_io_qpair(io_path->qpair);
|
||||
if (!rc) {
|
||||
nvme_ch->qpair = NULL;
|
||||
io_path->qpair = NULL;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@ -450,14 +450,14 @@ bdev_nvme_check_pending_destruct(struct spdk_io_channel_iter *i, int status)
|
||||
}
|
||||
|
||||
static void
|
||||
_bdev_nvme_complete_pending_resets(struct nvme_io_channel *nvme_ch,
|
||||
_bdev_nvme_complete_pending_resets(struct nvme_io_path *io_path,
|
||||
enum spdk_bdev_io_status status)
|
||||
{
|
||||
struct spdk_bdev_io *bdev_io;
|
||||
|
||||
while (!TAILQ_EMPTY(&nvme_ch->pending_resets)) {
|
||||
bdev_io = TAILQ_FIRST(&nvme_ch->pending_resets);
|
||||
TAILQ_REMOVE(&nvme_ch->pending_resets, bdev_io, module_link);
|
||||
while (!TAILQ_EMPTY(&io_path->pending_resets)) {
|
||||
bdev_io = TAILQ_FIRST(&io_path->pending_resets);
|
||||
TAILQ_REMOVE(&io_path->pending_resets, bdev_io, module_link);
|
||||
spdk_bdev_io_complete(bdev_io, status);
|
||||
}
|
||||
}
|
||||
@ -466,9 +466,9 @@ static void
|
||||
bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i)
|
||||
{
|
||||
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(_ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch);
|
||||
|
||||
_bdev_nvme_complete_pending_resets(nvme_ch, SPDK_BDEV_IO_STATUS_SUCCESS);
|
||||
_bdev_nvme_complete_pending_resets(io_path, SPDK_BDEV_IO_STATUS_SUCCESS);
|
||||
|
||||
spdk_for_each_channel_continue(i, 0);
|
||||
}
|
||||
@ -477,9 +477,9 @@ static void
|
||||
bdev_nvme_abort_pending_resets(struct spdk_io_channel_iter *i)
|
||||
{
|
||||
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(_ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch);
|
||||
|
||||
_bdev_nvme_complete_pending_resets(nvme_ch, SPDK_BDEV_IO_STATUS_FAILED);
|
||||
_bdev_nvme_complete_pending_resets(io_path, SPDK_BDEV_IO_STATUS_FAILED);
|
||||
|
||||
spdk_for_each_channel_continue(i, 0);
|
||||
}
|
||||
@ -559,10 +559,10 @@ static void
|
||||
_bdev_nvme_reset_create_qpair(struct spdk_io_channel_iter *i)
|
||||
{
|
||||
struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(_ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch);
|
||||
int rc;
|
||||
|
||||
rc = bdev_nvme_create_qpair(nvme_ch);
|
||||
rc = bdev_nvme_create_qpair(io_path);
|
||||
|
||||
spdk_for_each_channel_continue(i, rc);
|
||||
}
|
||||
@ -598,10 +598,10 @@ static void
|
||||
_bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i)
|
||||
{
|
||||
struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
int rc;
|
||||
|
||||
rc = bdev_nvme_destroy_qpair(nvme_ch);
|
||||
rc = bdev_nvme_destroy_qpair(io_path);
|
||||
|
||||
spdk_for_each_channel_continue(i, rc);
|
||||
}
|
||||
@ -634,21 +634,21 @@ _bdev_nvme_reset(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
|
||||
}
|
||||
|
||||
static int
|
||||
bdev_nvme_reset(struct nvme_io_channel *nvme_ch, struct spdk_bdev_io *bdev_io)
|
||||
bdev_nvme_reset(struct nvme_io_path *io_path, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = _bdev_nvme_reset(nvme_ch->ctrlr);
|
||||
rc = _bdev_nvme_reset(io_path->ctrlr);
|
||||
if (rc == 0) {
|
||||
assert(nvme_ch->ctrlr->reset_bdev_io == NULL);
|
||||
nvme_ch->ctrlr->reset_bdev_io = bdev_io;
|
||||
assert(io_path->ctrlr->reset_bdev_io == NULL);
|
||||
io_path->ctrlr->reset_bdev_io = bdev_io;
|
||||
} else if (rc == -EAGAIN) {
|
||||
/*
|
||||
* Reset call is queued only if it is from the app framework. This is on purpose so that
|
||||
* we don't interfere with the app framework reset strategy. i.e. we are deferring to the
|
||||
* upper level. If they are in the middle of a reset, we won't try to schedule another one.
|
||||
*/
|
||||
TAILQ_INSERT_TAIL(&nvme_ch->pending_resets, bdev_io, module_link);
|
||||
TAILQ_INSERT_TAIL(&io_path->pending_resets, bdev_io, module_link);
|
||||
} else {
|
||||
return rc;
|
||||
}
|
||||
@ -746,7 +746,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
||||
struct spdk_bdev *bdev = bdev_io->bdev;
|
||||
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt;
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_bdev_ns *nvme_ns;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
int ret;
|
||||
@ -756,7 +756,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair))) {
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair))) {
|
||||
ret = -ENXIO;
|
||||
goto exit;
|
||||
}
|
||||
@ -780,7 +780,7 @@ exit:
|
||||
static void
|
||||
bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct spdk_bdev *bdev = bdev_io->bdev;
|
||||
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt;
|
||||
struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
||||
@ -789,7 +789,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
int rc = 0;
|
||||
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair))) {
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair))) {
|
||||
rc = -ENXIO;
|
||||
goto exit;
|
||||
}
|
||||
@ -855,7 +855,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
bdev_io->u.bdev.num_blocks);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_RESET:
|
||||
rc = bdev_nvme_reset(nvme_ch, bdev_io);
|
||||
rc = bdev_nvme_reset(io_path, bdev_io);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_FLUSH:
|
||||
rc = bdev_nvme_flush(nvme_ns->ns,
|
||||
@ -891,7 +891,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
bdev_io->u.zone_mgmt.zone_action);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
|
||||
rc = bdev_nvme_admin_passthru(nvme_ch,
|
||||
rc = bdev_nvme_admin_passthru(io_path,
|
||||
nbdev_io,
|
||||
&bdev_io->u.nvme_passthru.cmd,
|
||||
bdev_io->u.nvme_passthru.buf,
|
||||
@ -917,7 +917,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_ABORT:
|
||||
nbdev_io_to_abort = (struct nvme_bdev_io *)bdev_io->u.abort.bio_to_abort->driver_ctx;
|
||||
rc = bdev_nvme_abort(nvme_ch,
|
||||
rc = bdev_nvme_abort(io_path,
|
||||
nbdev_io,
|
||||
nbdev_io_to_abort);
|
||||
break;
|
||||
@ -996,10 +996,10 @@ bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
|
||||
}
|
||||
|
||||
static int
|
||||
bdev_nvme_create_cb(void *io_device, void *ctx_buf)
|
||||
bdev_nvme_create_path_cb(void *io_device, void *ctx_buf)
|
||||
{
|
||||
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = io_device;
|
||||
struct nvme_io_channel *nvme_ch = ctx_buf;
|
||||
struct nvme_io_path *io_path = ctx_buf;
|
||||
struct spdk_io_channel *pg_ch;
|
||||
int rc;
|
||||
|
||||
@ -1008,26 +1008,26 @@ bdev_nvme_create_cb(void *io_device, void *ctx_buf)
|
||||
return -1;
|
||||
}
|
||||
|
||||
nvme_ch->group = spdk_io_channel_get_ctx(pg_ch);
|
||||
io_path->group = spdk_io_channel_get_ctx(pg_ch);
|
||||
|
||||
#ifdef SPDK_CONFIG_VTUNE
|
||||
nvme_ch->group->collect_spin_stat = true;
|
||||
io_path->group->collect_spin_stat = true;
|
||||
#else
|
||||
nvme_ch->group->collect_spin_stat = false;
|
||||
io_path->group->collect_spin_stat = false;
|
||||
#endif
|
||||
|
||||
TAILQ_INIT(&nvme_ch->pending_resets);
|
||||
TAILQ_INIT(&io_path->pending_resets);
|
||||
|
||||
if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_bdev_ctrlr->ctrlr)) {
|
||||
rc = bdev_ocssd_create_io_channel(nvme_ch);
|
||||
rc = bdev_ocssd_create_io_channel(io_path);
|
||||
if (rc != 0) {
|
||||
goto err_ocssd_ch;
|
||||
}
|
||||
}
|
||||
|
||||
nvme_ch->ctrlr = nvme_bdev_ctrlr;
|
||||
io_path->ctrlr = nvme_bdev_ctrlr;
|
||||
|
||||
rc = bdev_nvme_create_qpair(nvme_ch);
|
||||
rc = bdev_nvme_create_qpair(io_path);
|
||||
if (rc != 0) {
|
||||
goto err_qpair;
|
||||
}
|
||||
@ -1035,8 +1035,8 @@ bdev_nvme_create_cb(void *io_device, void *ctx_buf)
|
||||
return 0;
|
||||
|
||||
err_qpair:
|
||||
if (nvme_ch->ocssd_ch) {
|
||||
bdev_ocssd_destroy_io_channel(nvme_ch);
|
||||
if (io_path->ocssd_ch) {
|
||||
bdev_ocssd_destroy_io_channel(io_path);
|
||||
}
|
||||
err_ocssd_ch:
|
||||
spdk_put_io_channel(pg_ch);
|
||||
@ -1045,19 +1045,19 @@ err_ocssd_ch:
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_nvme_destroy_cb(void *io_device, void *ctx_buf)
|
||||
bdev_nvme_destroy_path_cb(void *io_device, void *ctx_buf)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = ctx_buf;
|
||||
struct nvme_io_path *io_path = ctx_buf;
|
||||
|
||||
assert(nvme_ch->group != NULL);
|
||||
assert(io_path->group != NULL);
|
||||
|
||||
if (nvme_ch->ocssd_ch != NULL) {
|
||||
bdev_ocssd_destroy_io_channel(nvme_ch);
|
||||
if (io_path->ocssd_ch != NULL) {
|
||||
bdev_ocssd_destroy_io_channel(io_path);
|
||||
}
|
||||
|
||||
bdev_nvme_destroy_qpair(nvme_ch);
|
||||
bdev_nvme_destroy_qpair(io_path);
|
||||
|
||||
spdk_put_io_channel(spdk_io_channel_from_ctx(nvme_ch->group));
|
||||
spdk_put_io_channel(spdk_io_channel_from_ctx(io_path->group));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1294,8 +1294,8 @@ bdev_nvme_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *
|
||||
static uint64_t
|
||||
bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_bdev_poll_group *group = nvme_ch->group;
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_bdev_poll_group *group = io_path->group;
|
||||
uint64_t spin_time;
|
||||
|
||||
if (!group || !group->collect_spin_stat) {
|
||||
@ -1832,8 +1832,10 @@ _nvme_bdev_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,
|
||||
|
||||
nvme_bdev_ctrlr->prchk_flags = prchk_flags;
|
||||
|
||||
spdk_io_device_register(nvme_bdev_ctrlr, bdev_nvme_create_cb, bdev_nvme_destroy_cb,
|
||||
sizeof(struct nvme_io_channel),
|
||||
spdk_io_device_register(nvme_bdev_ctrlr,
|
||||
bdev_nvme_create_path_cb,
|
||||
bdev_nvme_destroy_path_cb,
|
||||
sizeof(struct nvme_io_path),
|
||||
name);
|
||||
|
||||
nvme_bdev_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_bdev_ctrlr,
|
||||
@ -2562,7 +2564,7 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
||||
struct nvme_bdev_io *bio = ref;
|
||||
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
||||
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
|
||||
struct nvme_io_channel *nvme_ch;
|
||||
struct nvme_io_path *io_path;
|
||||
struct nvme_bdev_ns *nvme_ns;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
int ret;
|
||||
@ -2574,9 +2576,9 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
||||
/* Save completion status to use after verifying PI error. */
|
||||
bio->cpl = *cpl;
|
||||
|
||||
nvme_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
|
||||
io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
|
||||
|
||||
if (spdk_likely(bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair))) {
|
||||
if (spdk_likely(bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair))) {
|
||||
/* Read without PI checking to verify PI error. */
|
||||
ret = bdev_nvme_no_pi_readv(nvme_ns->ns,
|
||||
qpair,
|
||||
@ -2725,7 +2727,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
||||
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
|
||||
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
|
||||
struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
uint64_t zone_id = bdev_io->u.zone_mgmt.zone_id;
|
||||
uint32_t zones_to_copy = bdev_io->u.zone_mgmt.num_zones;
|
||||
struct spdk_bdev_zone_info *info = bdev_io->u.zone_mgmt.buf;
|
||||
@ -2739,7 +2741,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
|
||||
goto out_complete_io_nvme_cpl;
|
||||
}
|
||||
|
||||
if (!bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair)) {
|
||||
if (!bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair)) {
|
||||
ret = -ENXIO;
|
||||
goto out_complete_io_ret;
|
||||
}
|
||||
@ -3251,13 +3253,13 @@ bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
|
||||
}
|
||||
|
||||
static int
|
||||
bdev_nvme_admin_passthru(struct nvme_io_channel *nvme_ch, struct nvme_bdev_io *bio,
|
||||
bdev_nvme_admin_passthru(struct nvme_io_path *io_path, struct nvme_bdev_io *bio,
|
||||
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes)
|
||||
{
|
||||
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
|
||||
uint32_t max_xfer_size;
|
||||
|
||||
if (!bdev_nvme_find_admin_path(nvme_ch, &nvme_bdev_ctrlr)) {
|
||||
if (!bdev_nvme_find_admin_path(io_path, &nvme_bdev_ctrlr)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -3327,22 +3329,22 @@ bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
|
||||
}
|
||||
|
||||
static int
|
||||
bdev_nvme_abort(struct nvme_io_channel *nvme_ch, struct nvme_bdev_io *bio,
|
||||
bdev_nvme_abort(struct nvme_io_path *io_path, struct nvme_bdev_io *bio,
|
||||
struct nvme_bdev_io *bio_to_abort)
|
||||
{
|
||||
int rc;
|
||||
|
||||
bio->orig_thread = spdk_get_thread();
|
||||
|
||||
rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ch->ctrlr->ctrlr,
|
||||
nvme_ch->qpair,
|
||||
rc = spdk_nvme_ctrlr_cmd_abort_ext(io_path->ctrlr->ctrlr,
|
||||
io_path->qpair,
|
||||
bio_to_abort,
|
||||
bdev_nvme_abort_done, bio);
|
||||
if (rc == -ENOENT) {
|
||||
/* If no command was found in I/O qpair, the target command may be
|
||||
* admin command.
|
||||
*/
|
||||
rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ch->ctrlr->ctrlr,
|
||||
rc = spdk_nvme_ctrlr_cmd_abort_ext(io_path->ctrlr->ctrlr,
|
||||
NULL,
|
||||
bio_to_abort,
|
||||
bdev_nvme_abort_done, bio);
|
||||
|
@ -63,7 +63,7 @@ struct spdk_bdev_nvme_opts {
|
||||
bool delay_cmd_submit;
|
||||
};
|
||||
|
||||
struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch);
|
||||
struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *io_path_ch);
|
||||
void bdev_nvme_get_opts(struct spdk_bdev_nvme_opts *opts);
|
||||
int bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts);
|
||||
int bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb_ctx);
|
||||
|
@ -514,7 +514,7 @@ static void
|
||||
bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
|
||||
{
|
||||
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_bdev_ns *nvme_ns;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
int rc;
|
||||
@ -524,7 +524,7 @@ bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
return;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, nvme_ch,
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, io_path,
|
||||
&nvme_ns, &qpair))) {
|
||||
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
||||
return;
|
||||
@ -639,7 +639,7 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
|
||||
struct spdk_ocssd_chunk_information_entry *chunk_info = &ocdev_io->zone_info.chunk_info;
|
||||
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx);
|
||||
struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt;
|
||||
struct nvme_io_channel *nvme_ch;
|
||||
struct nvme_io_path *io_path;
|
||||
struct nvme_bdev_ns *nvme_ns;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
struct bdev_ocssd_ns *ocssd_ns;
|
||||
@ -651,9 +651,9 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
|
||||
return;
|
||||
}
|
||||
|
||||
nvme_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
|
||||
io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
|
||||
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, nvme_ch, &nvme_ns, &qpair))) {
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, io_path, &nvme_ns, &qpair))) {
|
||||
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
|
||||
return;
|
||||
}
|
||||
@ -741,14 +741,14 @@ static void bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bd
|
||||
static int
|
||||
bdev_ocssd_poll_pending(void *ctx)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = ctx;
|
||||
struct nvme_io_path *io_path = ctx;
|
||||
struct ocssd_io_channel *ocssd_ch;
|
||||
struct spdk_bdev_io *bdev_io;
|
||||
struct spdk_io_channel *ch;
|
||||
TAILQ_HEAD(, spdk_bdev_io) pending_requests;
|
||||
int num_requests = 0;
|
||||
|
||||
ocssd_ch = nvme_ch->ocssd_ch;
|
||||
ocssd_ch = io_path->ocssd_ch;
|
||||
|
||||
TAILQ_INIT(&pending_requests);
|
||||
TAILQ_SWAP(&ocssd_ch->pending_requests, &pending_requests, spdk_bdev_io, module_link);
|
||||
@ -770,8 +770,8 @@ bdev_ocssd_poll_pending(void *ctx)
|
||||
static void
|
||||
bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct ocssd_io_channel *ocssd_ch = nvme_ch->ocssd_ch;
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct ocssd_io_channel *ocssd_ch = io_path->ocssd_ch;
|
||||
|
||||
TAILQ_INSERT_TAIL(&ocssd_ch->pending_requests, bdev_io, module_link);
|
||||
spdk_poller_resume(ocssd_ch->pending_poller);
|
||||
@ -780,13 +780,13 @@ bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
|
||||
static int
|
||||
_bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
|
||||
struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
|
||||
struct nvme_bdev_ns *nvme_ns;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, nvme_ch,
|
||||
if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, io_path,
|
||||
&nvme_ns, &qpair))) {
|
||||
return -1;
|
||||
}
|
||||
@ -1496,7 +1496,7 @@ bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *nvme_ns)
|
||||
}
|
||||
|
||||
int
|
||||
bdev_ocssd_create_io_channel(struct nvme_io_channel *nvme_ch)
|
||||
bdev_ocssd_create_io_channel(struct nvme_io_path *io_path)
|
||||
{
|
||||
struct ocssd_io_channel *ocssd_ch;
|
||||
|
||||
@ -1505,7 +1505,7 @@ bdev_ocssd_create_io_channel(struct nvme_io_channel *nvme_ch)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ocssd_ch->pending_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_pending, nvme_ch, 0);
|
||||
ocssd_ch->pending_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_pending, io_path, 0);
|
||||
if (ocssd_ch->pending_poller == NULL) {
|
||||
SPDK_ERRLOG("Failed to register pending requests poller\n");
|
||||
free(ocssd_ch);
|
||||
@ -1516,16 +1516,16 @@ bdev_ocssd_create_io_channel(struct nvme_io_channel *nvme_ch)
|
||||
spdk_poller_pause(ocssd_ch->pending_poller);
|
||||
|
||||
TAILQ_INIT(&ocssd_ch->pending_requests);
|
||||
nvme_ch->ocssd_ch = ocssd_ch;
|
||||
io_path->ocssd_ch = ocssd_ch;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
bdev_ocssd_destroy_io_channel(struct nvme_io_channel *nvme_ch)
|
||||
bdev_ocssd_destroy_io_channel(struct nvme_io_path *io_path)
|
||||
{
|
||||
spdk_poller_unregister(&nvme_ch->ocssd_ch->pending_poller);
|
||||
free(nvme_ch->ocssd_ch);
|
||||
spdk_poller_unregister(&io_path->ocssd_ch->pending_poller);
|
||||
free(io_path->ocssd_ch);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -50,8 +50,8 @@ void bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
|
||||
void bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *nvme_ns);
|
||||
void bdev_ocssd_namespace_config_json(struct spdk_json_write_ctx *w, struct nvme_bdev_ns *nvme_ns);
|
||||
|
||||
int bdev_ocssd_create_io_channel(struct nvme_io_channel *ioch);
|
||||
void bdev_ocssd_destroy_io_channel(struct nvme_io_channel *ioch);
|
||||
int bdev_ocssd_create_io_channel(struct nvme_io_path *ioch);
|
||||
void bdev_ocssd_destroy_io_channel(struct nvme_io_path *ioch);
|
||||
|
||||
int bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
|
||||
void bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
|
||||
|
@ -154,7 +154,7 @@ struct nvme_async_probe_ctx {
|
||||
|
||||
struct ocssd_io_channel;
|
||||
|
||||
struct nvme_io_channel {
|
||||
struct nvme_io_path {
|
||||
struct nvme_bdev_ctrlr *ctrlr;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
struct nvme_bdev_poll_group *group;
|
||||
@ -178,24 +178,24 @@ void nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
|
||||
void nvme_bdev_ctrlr_unregister(void *ctx);
|
||||
|
||||
static inline bool
|
||||
bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_channel *nvme_ch,
|
||||
bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_path *io_path,
|
||||
struct nvme_bdev_ns **_nvme_ns, struct spdk_nvme_qpair **_qpair)
|
||||
{
|
||||
if (spdk_unlikely(nvme_ch->qpair == NULL)) {
|
||||
if (spdk_unlikely(io_path->qpair == NULL)) {
|
||||
/* The device is currently resetting. */
|
||||
return false;
|
||||
}
|
||||
|
||||
*_nvme_ns = nbdev->nvme_ns;
|
||||
*_qpair = nvme_ch->qpair;
|
||||
*_qpair = io_path->qpair;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bdev_nvme_find_admin_path(struct nvme_io_channel *nvme_ch,
|
||||
bdev_nvme_find_admin_path(struct nvme_io_path *io_path,
|
||||
struct nvme_bdev_ctrlr **_nvme_bdev_ctrlr)
|
||||
{
|
||||
*_nvme_bdev_ctrlr = nvme_ch->ctrlr;
|
||||
*_nvme_bdev_ctrlr = io_path->ctrlr;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -204,9 +204,9 @@ DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_bdev_ns *nvme_ns));
|
||||
DEFINE_STUB_V(bdev_ocssd_namespace_config_json, (struct spdk_json_write_ctx *w,
|
||||
struct nvme_bdev_ns *nvme_ns));
|
||||
|
||||
DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_io_channel *ioch), 0);
|
||||
DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_io_path *ioch), 0);
|
||||
|
||||
DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_io_channel *ioch));
|
||||
DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_io_path *ioch));
|
||||
|
||||
DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr), 0);
|
||||
|
||||
@ -1016,7 +1016,7 @@ test_reset_ctrlr(void)
|
||||
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
|
||||
struct nvme_bdev_ctrlr_trid *curr_trid;
|
||||
struct spdk_io_channel *ch1, *ch2;
|
||||
struct nvme_io_channel *nvme_ch1, *nvme_ch2;
|
||||
struct nvme_io_path *io_path1, *io_path2;
|
||||
int rc;
|
||||
|
||||
ut_init_trid(&trid);
|
||||
@ -1035,16 +1035,16 @@ test_reset_ctrlr(void)
|
||||
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
|
||||
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
|
||||
|
||||
nvme_ch1 = spdk_io_channel_get_ctx(ch1);
|
||||
CU_ASSERT(nvme_ch1->qpair != NULL);
|
||||
io_path1 = spdk_io_channel_get_ctx(ch1);
|
||||
CU_ASSERT(io_path1->qpair != NULL);
|
||||
|
||||
set_thread(1);
|
||||
|
||||
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
|
||||
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
|
||||
|
||||
nvme_ch2 = spdk_io_channel_get_ctx(ch2);
|
||||
CU_ASSERT(nvme_ch2->qpair != NULL);
|
||||
io_path2 = spdk_io_channel_get_ctx(ch2);
|
||||
CU_ASSERT(io_path2->qpair != NULL);
|
||||
|
||||
/* Reset starts from thread 1. */
|
||||
set_thread(1);
|
||||
@ -1070,28 +1070,28 @@ test_reset_ctrlr(void)
|
||||
rc = _bdev_nvme_reset(nvme_bdev_ctrlr);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
|
||||
CU_ASSERT(nvme_ch1->qpair != NULL);
|
||||
CU_ASSERT(nvme_ch2->qpair != NULL);
|
||||
CU_ASSERT(io_path1->qpair != NULL);
|
||||
CU_ASSERT(io_path2->qpair != NULL);
|
||||
|
||||
poll_thread_times(0, 1);
|
||||
CU_ASSERT(nvme_ch1->qpair == NULL);
|
||||
CU_ASSERT(nvme_ch2->qpair != NULL);
|
||||
CU_ASSERT(io_path1->qpair == NULL);
|
||||
CU_ASSERT(io_path2->qpair != NULL);
|
||||
|
||||
poll_thread_times(1, 1);
|
||||
CU_ASSERT(nvme_ch1->qpair == NULL);
|
||||
CU_ASSERT(nvme_ch2->qpair == NULL);
|
||||
CU_ASSERT(io_path1->qpair == NULL);
|
||||
CU_ASSERT(io_path2->qpair == NULL);
|
||||
CU_ASSERT(ctrlr.is_failed == true);
|
||||
|
||||
poll_thread_times(1, 1);
|
||||
CU_ASSERT(ctrlr.is_failed == false);
|
||||
|
||||
poll_thread_times(0, 1);
|
||||
CU_ASSERT(nvme_ch1->qpair != NULL);
|
||||
CU_ASSERT(nvme_ch2->qpair == NULL);
|
||||
CU_ASSERT(io_path1->qpair != NULL);
|
||||
CU_ASSERT(io_path2->qpair == NULL);
|
||||
|
||||
poll_thread_times(1, 1);
|
||||
CU_ASSERT(nvme_ch1->qpair != NULL);
|
||||
CU_ASSERT(nvme_ch2->qpair != NULL);
|
||||
CU_ASSERT(io_path1->qpair != NULL);
|
||||
CU_ASSERT(io_path2->qpair != NULL);
|
||||
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
|
||||
CU_ASSERT(curr_trid->is_failed == true);
|
||||
|
||||
@ -1344,7 +1344,7 @@ test_pending_reset(void)
|
||||
const char *attached_names[STRING_SIZE];
|
||||
struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
|
||||
struct spdk_io_channel *ch1, *ch2;
|
||||
struct nvme_io_channel *nvme_ch1, *nvme_ch2;
|
||||
struct nvme_io_path *io_path1, *io_path2;
|
||||
int rc;
|
||||
|
||||
memset(attached_names, 0, sizeof(char *) * STRING_SIZE);
|
||||
@ -1379,28 +1379,28 @@ test_pending_reset(void)
|
||||
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
|
||||
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
|
||||
|
||||
nvme_ch1 = spdk_io_channel_get_ctx(ch1);
|
||||
io_path1 = spdk_io_channel_get_ctx(ch1);
|
||||
|
||||
set_thread(1);
|
||||
|
||||
ch2 = spdk_get_io_channel(nvme_bdev_ctrlr);
|
||||
SPDK_CU_ASSERT_FATAL(ch2 != NULL);
|
||||
|
||||
nvme_ch2 = spdk_io_channel_get_ctx(ch2);
|
||||
io_path2 = spdk_io_channel_get_ctx(ch2);
|
||||
|
||||
/* The first reset request is submitted on thread 1, and the second reset request
|
||||
* is submitted on thread 0 while processing the first request.
|
||||
*/
|
||||
rc = bdev_nvme_reset(nvme_ch2, first_bdev_io);
|
||||
rc = bdev_nvme_reset(io_path2, first_bdev_io);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
|
||||
CU_ASSERT(TAILQ_EMPTY(&nvme_ch2->pending_resets));
|
||||
CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets));
|
||||
|
||||
set_thread(0);
|
||||
|
||||
rc = bdev_nvme_reset(nvme_ch1, second_bdev_io);
|
||||
rc = bdev_nvme_reset(io_path1, second_bdev_io);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(TAILQ_FIRST(&nvme_ch1->pending_resets) == second_bdev_io);
|
||||
CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io);
|
||||
|
||||
poll_threads();
|
||||
|
||||
@ -1416,16 +1416,16 @@ test_pending_reset(void)
|
||||
*/
|
||||
set_thread(1);
|
||||
|
||||
rc = bdev_nvme_reset(nvme_ch2, first_bdev_io);
|
||||
rc = bdev_nvme_reset(io_path2, first_bdev_io);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(nvme_bdev_ctrlr->resetting == true);
|
||||
CU_ASSERT(TAILQ_EMPTY(&nvme_ch2->pending_resets));
|
||||
CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets));
|
||||
|
||||
set_thread(0);
|
||||
|
||||
rc = bdev_nvme_reset(nvme_ch1, second_bdev_io);
|
||||
rc = bdev_nvme_reset(io_path1, second_bdev_io);
|
||||
CU_ASSERT(rc == 0);
|
||||
CU_ASSERT(TAILQ_FIRST(&nvme_ch1->pending_resets) == second_bdev_io);
|
||||
CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io);
|
||||
|
||||
ctrlr->fail_reset = true;
|
||||
|
||||
@ -1591,7 +1591,7 @@ test_reconnect_qpair(void)
|
||||
struct spdk_nvme_ctrlr ctrlr = {};
|
||||
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
|
||||
struct spdk_io_channel *ch;
|
||||
struct nvme_io_channel *nvme_ch;
|
||||
struct nvme_io_path *io_path;
|
||||
int rc;
|
||||
|
||||
set_thread(0);
|
||||
@ -1607,26 +1607,26 @@ test_reconnect_qpair(void)
|
||||
ch = spdk_get_io_channel(nvme_bdev_ctrlr);
|
||||
SPDK_CU_ASSERT_FATAL(ch != NULL);
|
||||
|
||||
nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
CU_ASSERT(nvme_ch->qpair != NULL);
|
||||
CU_ASSERT(nvme_ch->group != NULL);
|
||||
CU_ASSERT(nvme_ch->group->group != NULL);
|
||||
CU_ASSERT(nvme_ch->group->poller != NULL);
|
||||
io_path = spdk_io_channel_get_ctx(ch);
|
||||
CU_ASSERT(io_path->qpair != NULL);
|
||||
CU_ASSERT(io_path->group != NULL);
|
||||
CU_ASSERT(io_path->group->group != NULL);
|
||||
CU_ASSERT(io_path->group->poller != NULL);
|
||||
|
||||
/* Test if the disconnected qpair is reconnected. */
|
||||
nvme_ch->qpair->is_connected = false;
|
||||
io_path->qpair->is_connected = false;
|
||||
|
||||
poll_threads();
|
||||
|
||||
CU_ASSERT(nvme_ch->qpair->is_connected == true);
|
||||
CU_ASSERT(io_path->qpair->is_connected == true);
|
||||
|
||||
/* If the ctrlr is failed, reconnecting qpair should fail too. */
|
||||
nvme_ch->qpair->is_connected = false;
|
||||
io_path->qpair->is_connected = false;
|
||||
ctrlr.is_failed = true;
|
||||
|
||||
poll_threads();
|
||||
|
||||
CU_ASSERT(nvme_ch->qpair->is_connected == false);
|
||||
CU_ASSERT(io_path->qpair->is_connected == false);
|
||||
|
||||
spdk_put_io_channel(ch);
|
||||
|
||||
@ -1721,12 +1721,12 @@ static void
|
||||
ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
enum spdk_bdev_io_type io_type)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
|
||||
struct nvme_bdev_ns *nvme_ns = NULL;
|
||||
struct spdk_nvme_qpair *qpair = NULL;
|
||||
|
||||
CU_ASSERT(bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair));
|
||||
CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair));
|
||||
|
||||
bdev_io->type = io_type;
|
||||
bdev_io->internal.in_submit_request = true;
|
||||
@ -1747,12 +1747,12 @@ static void
|
||||
ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
enum spdk_bdev_io_type io_type)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
|
||||
struct nvme_bdev_ns *nvme_ns = NULL;
|
||||
struct spdk_nvme_qpair *qpair = NULL;
|
||||
|
||||
CU_ASSERT(bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair));
|
||||
CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair));
|
||||
|
||||
bdev_io->type = io_type;
|
||||
bdev_io->internal.in_submit_request = true;
|
||||
@ -1767,14 +1767,14 @@ ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
|
||||
static void
|
||||
ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||
{
|
||||
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
|
||||
struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
|
||||
struct ut_nvme_req *req;
|
||||
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
|
||||
struct nvme_bdev_ns *nvme_ns = NULL;
|
||||
struct spdk_nvme_qpair *qpair = NULL;
|
||||
|
||||
CU_ASSERT(bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair));
|
||||
CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair));
|
||||
|
||||
/* Only compare and write now. */
|
||||
bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
|
||||
@ -1787,7 +1787,7 @@ ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *b
|
||||
CU_ASSERT(bio->first_fused_submitted == true);
|
||||
|
||||
/* First outstanding request is compare operation. */
|
||||
req = TAILQ_FIRST(&nvme_ch->qpair->outstanding_reqs);
|
||||
req = TAILQ_FIRST(&io_path->qpair->outstanding_reqs);
|
||||
SPDK_CU_ASSERT_FATAL(req != NULL);
|
||||
CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
|
||||
req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
|
||||
@ -1996,7 +1996,7 @@ test_abort(void)
|
||||
struct nvme_bdev *bdev;
|
||||
struct spdk_bdev_io *write_io, *admin_io, *abort_io;
|
||||
struct spdk_io_channel *ch1, *ch2;
|
||||
struct nvme_io_channel *nvme_ch1;
|
||||
struct nvme_io_path *io_path1;
|
||||
int rc;
|
||||
|
||||
/* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on
|
||||
@ -2048,7 +2048,7 @@ test_abort(void)
|
||||
|
||||
ch1 = spdk_get_io_channel(nvme_bdev_ctrlr);
|
||||
SPDK_CU_ASSERT_FATAL(ch1 != NULL);
|
||||
nvme_ch1 = spdk_io_channel_get_ctx(ch1);
|
||||
io_path1 = spdk_io_channel_get_ctx(ch1);
|
||||
|
||||
set_thread(1);
|
||||
|
||||
@ -2102,7 +2102,7 @@ test_abort(void)
|
||||
bdev_nvme_submit_request(ch1, write_io);
|
||||
|
||||
CU_ASSERT(write_io->internal.in_submit_request == true);
|
||||
CU_ASSERT(nvme_ch1->qpair->num_outstanding_reqs == 1);
|
||||
CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 1);
|
||||
|
||||
abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
|
||||
abort_io->u.abort.bio_to_abort = write_io;
|
||||
@ -2118,7 +2118,7 @@ test_abort(void)
|
||||
CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
|
||||
CU_ASSERT(write_io->internal.in_submit_request == false);
|
||||
CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);
|
||||
CU_ASSERT(nvme_ch1->qpair->num_outstanding_reqs == 0);
|
||||
CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 0);
|
||||
|
||||
/* Aborting the admin request should succeed. */
|
||||
admin_io->internal.in_submit_request = true;
|
||||
@ -2174,7 +2174,7 @@ test_get_io_qpair(void)
|
||||
struct spdk_nvme_ctrlr ctrlr = {};
|
||||
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL;
|
||||
struct spdk_io_channel *ch;
|
||||
struct nvme_io_channel *nvme_ch;
|
||||
struct nvme_io_path *io_path;
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
int rc;
|
||||
|
||||
@ -2190,11 +2190,11 @@ test_get_io_qpair(void)
|
||||
|
||||
ch = spdk_get_io_channel(nvme_bdev_ctrlr);
|
||||
SPDK_CU_ASSERT_FATAL(ch != NULL);
|
||||
nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
CU_ASSERT(nvme_ch->qpair != NULL);
|
||||
io_path = spdk_io_channel_get_ctx(ch);
|
||||
CU_ASSERT(io_path->qpair != NULL);
|
||||
|
||||
qpair = bdev_nvme_get_io_qpair(ch);
|
||||
CU_ASSERT(qpair == nvme_ch->qpair);
|
||||
CU_ASSERT(qpair == io_path->qpair);
|
||||
|
||||
spdk_put_io_channel(ch);
|
||||
|
||||
|
@ -885,7 +885,7 @@ test_get_zone_info(void)
|
||||
struct spdk_bdev *bdev;
|
||||
struct spdk_bdev_io *bdev_io;
|
||||
struct spdk_io_channel *ch;
|
||||
struct nvme_io_channel *nvme_ch;
|
||||
struct nvme_io_path *io_path;
|
||||
#define MAX_ZONE_INFO_COUNT 64
|
||||
struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT];
|
||||
struct spdk_ocssd_chunk_information_entry *chunk_info;
|
||||
@ -915,12 +915,12 @@ test_get_zone_info(void)
|
||||
bdev = spdk_bdev_get_by_name(bdev_name);
|
||||
SPDK_CU_ASSERT_FATAL(bdev != NULL);
|
||||
|
||||
ch = calloc(1, sizeof(*ch) + sizeof(*nvme_ch));
|
||||
ch = calloc(1, sizeof(*ch) + sizeof(*io_path));
|
||||
SPDK_CU_ASSERT_FATAL(ch != NULL);
|
||||
|
||||
nvme_ch = spdk_io_channel_get_ctx(ch);
|
||||
nvme_ch->ctrlr = nvme_bdev_ctrlr;
|
||||
nvme_ch->qpair = (struct spdk_nvme_qpair *)0x1;
|
||||
io_path = spdk_io_channel_get_ctx(ch);
|
||||
io_path->ctrlr = nvme_bdev_ctrlr;
|
||||
io_path->qpair = (struct spdk_nvme_qpair *)0x1;
|
||||
|
||||
bdev_io = alloc_ocssd_io();
|
||||
bdev_io->internal.cb = get_zone_info_cb;
|
||||
|
Loading…
Reference in New Issue
Block a user