bdev/nvme: Create nvme_bdev_channel for nvme_bdev

Create a single nvme_bdev_channel for each nvme_bdev and each SPDK
thread. nvme_bdev_channel has a pair of nvme_ns and nvme_ctrlr_channel.

The pair of nvme_ns and nvme_ctrlr_channel will be aggregated by
nvme_ns_channel in the following patches.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I095a2d6afa4ea23a87e4452b2f9d4c7e0087abe0
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6605
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Ziye Yang <ziye.yang@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
This commit is contained in:
Shuhei Matsumoto 2021-07-07 02:35:01 +09:00 committed by Tomasz Zawadzki
parent f6b19841da
commit d2d1899b90
6 changed files with 164 additions and 81 deletions

View File

@ -177,7 +177,7 @@ static int bdev_nvme_get_zone_info(struct spdk_nvme_ns *ns, struct spdk_nvme_qpa
static int bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, static int bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct nvme_bdev_io *bio, uint64_t zone_id, struct nvme_bdev_io *bio, uint64_t zone_id,
enum spdk_bdev_zone_action action); enum spdk_bdev_zone_action action);
static int bdev_nvme_admin_passthru(struct nvme_ctrlr_channel *ctrlr_ch, static int bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch,
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio,
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes); struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes);
static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
@ -186,9 +186,9 @@ static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio,
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len); struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len);
static int bdev_nvme_abort(struct nvme_ctrlr_channel *ctrlr_ch, static int bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch,
struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort); struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
static int bdev_nvme_reset_io(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio); static int bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio);
static int bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove); static int bdev_nvme_failover(struct nvme_ctrlr *nvme_ctrlr, bool remove);
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr); static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);
@ -253,24 +253,24 @@ static struct spdk_bdev_module nvme_if = {
SPDK_BDEV_MODULE_REGISTER(nvme, &nvme_if) SPDK_BDEV_MODULE_REGISTER(nvme, &nvme_if)
static inline bool static inline bool
bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_ctrlr_channel *ctrlr_ch, bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch,
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair) struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)
{ {
if (spdk_unlikely(ctrlr_ch->qpair == NULL)) { if (spdk_unlikely(nbdev_ch->ctrlr_ch->qpair == NULL)) {
/* The device is currently resetting. */ /* The device is currently resetting. */
return false; return false;
} }
*_ns = nbdev->nvme_ns->ns; *_ns = nbdev_ch->nvme_ns->ns;
*_qpair = ctrlr_ch->qpair; *_qpair = nbdev_ch->ctrlr_ch->qpair;
return true; return true;
} }
static inline bool static inline bool
bdev_nvme_find_admin_path(struct nvme_ctrlr_channel *ctrlr_ch, bdev_nvme_find_admin_path(struct nvme_bdev_channel *nbdev_ch,
struct nvme_ctrlr **_nvme_ctrlr) struct nvme_ctrlr **_nvme_ctrlr)
{ {
*_nvme_ctrlr = ctrlr_ch->ctrlr; *_nvme_ctrlr = nbdev_ch->ctrlr_ch->ctrlr;
return true; return true;
} }
@ -357,6 +357,15 @@ bdev_nvme_poll_adminq(void *arg)
return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY; return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
} }
static void
_bdev_nvme_unregister_dev_cb(void *io_device)
{
struct nvme_bdev *nvme_disk = io_device;
free(nvme_disk->disk.name);
free(nvme_disk);
}
static int static int
bdev_nvme_destruct(void *ctx) bdev_nvme_destruct(void *ctx)
{ {
@ -375,8 +384,7 @@ bdev_nvme_destruct(void *ctx)
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex); pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
} }
free(nvme_disk->disk.name); spdk_io_device_unregister(nvme_disk, _bdev_nvme_unregister_dev_cb);
free(nvme_disk);
return 0; return 0;
} }
@ -638,8 +646,9 @@ bdev_nvme_reset_io_complete(void *cb_arg, int rc)
} }
static int static int
bdev_nvme_reset_io(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio) bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
int rc; int rc;
@ -759,8 +768,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
{ {
struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
struct spdk_bdev *bdev = bdev_io->bdev; struct spdk_bdev *bdev = bdev_io->bdev;
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt; struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int ret; int ret;
@ -770,7 +778,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
goto exit; goto exit;
} }
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair))) { if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair))) {
ret = -ENXIO; ret = -ENXIO;
goto exit; goto exit;
} }
@ -794,16 +802,15 @@ exit:
static void static void
bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct spdk_bdev *bdev = bdev_io->bdev; struct spdk_bdev *bdev = bdev_io->bdev;
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt;
struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx; struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
struct nvme_bdev_io *nbdev_io_to_abort; struct nvme_bdev_io *nbdev_io_to_abort;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int rc = 0; int rc = 0;
if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair))) { if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair))) {
rc = -ENXIO; rc = -ENXIO;
goto exit; goto exit;
} }
@ -875,7 +882,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
bdev_io->u.bdev.num_blocks); bdev_io->u.bdev.num_blocks);
break; break;
case SPDK_BDEV_IO_TYPE_RESET: case SPDK_BDEV_IO_TYPE_RESET:
rc = bdev_nvme_reset_io(ctrlr_ch, nbdev_io); rc = bdev_nvme_reset_io(nbdev_ch, nbdev_io);
break; break;
case SPDK_BDEV_IO_TYPE_FLUSH: case SPDK_BDEV_IO_TYPE_FLUSH:
rc = bdev_nvme_flush(ns, rc = bdev_nvme_flush(ns,
@ -911,7 +918,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
bdev_io->u.zone_mgmt.zone_action); bdev_io->u.zone_mgmt.zone_action);
break; break;
case SPDK_BDEV_IO_TYPE_NVME_ADMIN: case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
rc = bdev_nvme_admin_passthru(ctrlr_ch, rc = bdev_nvme_admin_passthru(nbdev_ch,
nbdev_io, nbdev_io,
&bdev_io->u.nvme_passthru.cmd, &bdev_io->u.nvme_passthru.cmd,
bdev_io->u.nvme_passthru.buf, bdev_io->u.nvme_passthru.buf,
@ -937,7 +944,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
break; break;
case SPDK_BDEV_IO_TYPE_ABORT: case SPDK_BDEV_IO_TYPE_ABORT:
nbdev_io_to_abort = (struct nvme_bdev_io *)bdev_io->u.abort.bio_to_abort->driver_ctx; nbdev_io_to_abort = (struct nvme_bdev_io *)bdev_io->u.abort.bio_to_abort->driver_ctx;
rc = bdev_nvme_abort(ctrlr_ch, rc = bdev_nvme_abort(nbdev_ch,
nbdev_io, nbdev_io,
nbdev_io_to_abort); nbdev_io_to_abort);
break; break;
@ -1151,7 +1158,7 @@ bdev_nvme_get_io_channel(void *ctx)
{ {
struct nvme_bdev *nvme_bdev = ctx; struct nvme_bdev *nvme_bdev = ctx;
return spdk_get_io_channel(nvme_bdev->nvme_ns->ctrlr); return spdk_get_io_channel(nvme_bdev);
} }
static void * static void *
@ -1309,7 +1316,8 @@ bdev_nvme_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *
static uint64_t static uint64_t
bdev_nvme_get_spin_time(struct spdk_io_channel *ch) bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
struct nvme_poll_group *group = ctrlr_ch->group; struct nvme_poll_group *group = ctrlr_ch->group;
uint64_t spin_time; uint64_t spin_time;
@ -1405,7 +1413,6 @@ nvme_disk_create(struct spdk_bdev *disk, const char *base_name,
const uint8_t *nguid; const uint8_t *nguid;
const struct spdk_nvme_ctrlr_data *cdata; const struct spdk_nvme_ctrlr_data *cdata;
const struct spdk_nvme_ns_data *nsdata; const struct spdk_nvme_ns_data *nsdata;
int rc;
enum spdk_nvme_csi csi; enum spdk_nvme_csi csi;
uint32_t atomic_bs, phys_bs, bs; uint32_t atomic_bs, phys_bs, bs;
@ -1495,12 +1502,6 @@ nvme_disk_create(struct spdk_bdev *disk, const char *base_name,
disk->ctxt = ctx; disk->ctxt = ctx;
disk->fn_table = &nvmelib_fn_table; disk->fn_table = &nvmelib_fn_table;
disk->module = &nvme_if; disk->module = &nvme_if;
rc = spdk_bdev_register(disk);
if (rc) {
SPDK_ERRLOG("spdk_bdev_register() failed\n");
free(disk->name);
return rc;
}
return 0; return 0;
} }
@ -1528,6 +1529,21 @@ nvme_bdev_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvme_ns)
return rc; return rc;
} }
spdk_io_device_register(bdev,
bdev_nvme_create_bdev_channel_cb,
bdev_nvme_destroy_bdev_channel_cb,
sizeof(struct nvme_bdev_channel),
bdev->disk.name);
rc = spdk_bdev_register(&bdev->disk);
if (rc != 0) {
SPDK_ERRLOG("spdk_bdev_register() failed\n");
spdk_io_device_unregister(bdev, NULL);
free(bdev->disk.name);
free(bdev);
return rc;
}
nvme_ns->bdev = bdev; nvme_ns->bdev = bdev;
return 0; return 0;
@ -2817,8 +2833,7 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
{ {
struct nvme_bdev_io *bio = ref; struct nvme_bdev_io *bio = ref;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; struct nvme_bdev_channel *nbdev_ch;
struct nvme_ctrlr_channel *ctrlr_ch;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int ret; int ret;
@ -2830,9 +2845,9 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
/* Save completion status to use after verifying PI error. */ /* Save completion status to use after verifying PI error. */
bio->cpl = *cpl; bio->cpl = *cpl;
ctrlr_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io)); nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
if (spdk_likely(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair))) { if (spdk_likely(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair))) {
/* Read without PI checking to verify PI error. */ /* Read without PI checking to verify PI error. */
ret = bdev_nvme_no_pi_readv(ns, ret = bdev_nvme_no_pi_readv(ns,
qpair, qpair,
@ -2979,9 +2994,8 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
{ {
struct nvme_bdev_io *bio = ref; struct nvme_bdev_io *bio = ref;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
uint64_t zone_id = bdev_io->u.zone_mgmt.zone_id; uint64_t zone_id = bdev_io->u.zone_mgmt.zone_id;
uint32_t zones_to_copy = bdev_io->u.zone_mgmt.num_zones; uint32_t zones_to_copy = bdev_io->u.zone_mgmt.num_zones;
struct spdk_bdev_zone_info *info = bdev_io->u.zone_mgmt.buf; struct spdk_bdev_zone_info *info = bdev_io->u.zone_mgmt.buf;
@ -2995,7 +3009,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
goto out_complete_io_nvme_cpl; goto out_complete_io_nvme_cpl;
} }
if (!bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair)) { if (!bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair)) {
ret = -ENXIO; ret = -ENXIO;
goto out_complete_io_ret; goto out_complete_io_ret;
} }
@ -3524,13 +3538,13 @@ bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
} }
static int static int
bdev_nvme_admin_passthru(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio, bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes) struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes)
{ {
struct nvme_ctrlr *nvme_ctrlr; struct nvme_ctrlr *nvme_ctrlr;
uint32_t max_xfer_size; uint32_t max_xfer_size;
if (!bdev_nvme_find_admin_path(ctrlr_ch, &nvme_ctrlr)) { if (!bdev_nvme_find_admin_path(nbdev_ch, &nvme_ctrlr)) {
return -EINVAL; return -EINVAL;
} }
@ -3600,9 +3614,10 @@ bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
} }
static int static int
bdev_nvme_abort(struct nvme_ctrlr_channel *ctrlr_ch, struct nvme_bdev_io *bio, bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
struct nvme_bdev_io *bio_to_abort) struct nvme_bdev_io *bio_to_abort)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
int rc; int rc;
bio->orig_thread = spdk_get_thread(); bio->orig_thread = spdk_get_thread();

View File

@ -106,18 +106,18 @@ bdev_ocssd_get_ns_from_nvme(struct nvme_ns *nvme_ns)
} }
static inline bool static inline bool
bdev_ocssd_find_io_path(struct nvme_bdev *nbdev, struct nvme_ctrlr_channel *ctrlr_ch, bdev_ocssd_find_io_path(struct nvme_bdev_channel *nbdev_ch,
struct bdev_ocssd_ns **_ocssd_ns, struct bdev_ocssd_ns **_ocssd_ns,
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair) struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)
{ {
if (spdk_unlikely(ctrlr_ch->qpair == NULL)) { if (spdk_unlikely(nbdev_ch->ctrlr_ch->qpair == NULL)) {
/* The device is currently resetting. */ /* The device is currently resetting. */
return false; return false;
} }
*_ocssd_ns = bdev_ocssd_get_ns_from_nvme(nbdev->nvme_ns); *_ocssd_ns = bdev_ocssd_get_ns_from_nvme(nbdev_ch->nvme_ns);
*_ns = nbdev->nvme_ns->ns; *_ns = nbdev_ch->nvme_ns->ns;
*_qpair = ctrlr_ch->qpair; *_qpair = nbdev_ch->ctrlr_ch->qpair;
return true; return true;
} }
@ -224,6 +224,15 @@ bdev_ocssd_free_bdev(struct ocssd_bdev *ocssd_bdev)
free(ocssd_bdev); free(ocssd_bdev);
} }
static void
_bdev_ocssd_unregister_dev_cb(void *io_device)
{
struct nvme_bdev *nvme_bdev = io_device;
struct ocssd_bdev *ocssd_bdev = nvme_bdev->disk.ctxt;
bdev_ocssd_free_bdev(ocssd_bdev);
}
static int static int
bdev_ocssd_destruct(void *ctx) bdev_ocssd_destruct(void *ctx)
{ {
@ -243,7 +252,7 @@ bdev_ocssd_destruct(void *ctx)
pthread_mutex_unlock(&nvme_ns->ctrlr->mutex); pthread_mutex_unlock(&nvme_ns->ctrlr->mutex);
} }
bdev_ocssd_free_bdev(ocssd_bdev); spdk_io_device_unregister(nvme_bdev, _bdev_ocssd_unregister_dev_cb);
return 0; return 0;
} }
@ -527,7 +536,7 @@ static void
bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
{ {
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct bdev_ocssd_ns *ocssd_ns; struct bdev_ocssd_ns *ocssd_ns;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
@ -538,8 +547,7 @@ bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
return; return;
} }
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, ctrlr_ch, if (spdk_unlikely(!bdev_ocssd_find_io_path(nbdev_ch, &ocssd_ns, &ns, &qpair))) {
&ocssd_ns, &ns, &qpair))) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return; return;
} }
@ -653,7 +661,7 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
struct spdk_ocssd_chunk_information_entry *chunk_info = &ocdev_io->zone_info.chunk_info; struct spdk_ocssd_chunk_information_entry *chunk_info = &ocdev_io->zone_info.chunk_info;
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx);
struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt;
struct nvme_ctrlr_channel *ctrlr_ch; struct nvme_bdev_channel *nbdev_ch;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
struct bdev_ocssd_ns *ocssd_ns; struct bdev_ocssd_ns *ocssd_ns;
@ -665,10 +673,9 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
return; return;
} }
ctrlr_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io)); nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, ctrlr_ch, &ocssd_ns, &ns, if (spdk_unlikely(!bdev_ocssd_find_io_path(nbdev_ch, &ocssd_ns, &ns, &qpair))) {
&qpair))) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return; return;
} }
@ -786,7 +793,8 @@ bdev_ocssd_poll_pending(void *ctx)
static void static void
bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct nvme_ctrlr_channel *ctrlr_ch = nbdev_ch->ctrlr_ch;
struct ocssd_io_channel *ocssd_ch = ctrlr_ch->ocssd_ch; struct ocssd_io_channel *ocssd_ch = ctrlr_ch->ocssd_ch;
TAILQ_INSERT_TAIL(&ocssd_ch->pending_requests, bdev_io, module_link); TAILQ_INSERT_TAIL(&ocssd_ch->pending_requests, bdev_io, module_link);
@ -796,15 +804,14 @@ bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
static int static int
_bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) _bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx; struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
struct bdev_ocssd_ns *ocssd_ns; struct bdev_ocssd_ns *ocssd_ns;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, ctrlr_ch, if (spdk_unlikely(!bdev_ocssd_find_io_path(nbdev_ch, &ocssd_ns, &ns, &qpair))) {
&ocssd_ns, &ns, &qpair))) {
return -1; return -1;
} }
@ -909,7 +916,7 @@ bdev_ocssd_get_io_channel(void *ctx)
{ {
struct ocssd_bdev *ocssd_bdev = ctx; struct ocssd_bdev *ocssd_bdev = ctx;
return spdk_get_io_channel(ocssd_bdev->nvme_bdev.nvme_ns->ctrlr); return spdk_get_io_channel(&ocssd_bdev->nvme_bdev);
} }
static void static void
@ -1132,11 +1139,18 @@ bdev_ocssd_register_bdev(void *ctx)
struct nvme_ns *nvme_ns = create_ctx->nvme_ns; struct nvme_ns *nvme_ns = create_ctx->nvme_ns;
int rc; int rc;
spdk_io_device_register(nvme_bdev,
bdev_nvme_create_bdev_channel_cb,
bdev_nvme_destroy_bdev_channel_cb,
sizeof(struct nvme_bdev_channel),
nvme_bdev->disk.name);
rc = spdk_bdev_register(&nvme_bdev->disk); rc = spdk_bdev_register(&nvme_bdev->disk);
if (spdk_likely(rc == 0)) { if (spdk_likely(rc == 0)) {
nvme_ns->bdev = nvme_bdev; nvme_ns->bdev = nvme_bdev;
} else { } else {
SPDK_ERRLOG("Failed to register bdev %s\n", nvme_bdev->disk.name); SPDK_ERRLOG("Failed to register bdev %s\n", nvme_bdev->disk.name);
spdk_io_device_unregister(nvme_bdev, NULL);
} }
bdev_ocssd_create_complete(create_ctx, rc); bdev_ocssd_create_complete(create_ctx, rc);

View File

@ -219,3 +219,35 @@ nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns)
nvme_ctrlr_release(nvme_ctrlr); nvme_ctrlr_release(nvme_ctrlr);
} }
int
bdev_nvme_create_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct nvme_bdev *nbdev = io_device;
struct nvme_ns *nvme_ns;
struct spdk_io_channel *ch;
nvme_ns = nbdev->nvme_ns;
ch = spdk_get_io_channel(nvme_ns->ctrlr);
if (ch == NULL) {
SPDK_ERRLOG("Failed to alloc io_channel.\n");
return -ENOMEM;
}
nbdev_ch->ctrlr_ch = spdk_io_channel_get_ctx(ch);
nbdev_ch->nvme_ns = nvme_ns;
return 0;
}
void
bdev_nvme_destroy_bdev_channel_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_channel *nbdev_ch = ctx_buf;
struct spdk_io_channel *ch;
ch = spdk_io_channel_from_ctx(nbdev_ch->ctrlr_ch);
spdk_put_io_channel(ch);
}

View File

@ -174,6 +174,11 @@ struct nvme_ctrlr_channel {
struct ocssd_io_channel *ocssd_ch; struct ocssd_io_channel *ocssd_ch;
}; };
struct nvme_bdev_channel {
struct nvme_ns *nvme_ns;
struct nvme_ctrlr_channel *ctrlr_ch;
};
void nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx, void nvme_ctrlr_populate_namespace_done(struct nvme_async_probe_ctx *ctx,
struct nvme_ns *nvme_ns, int rc); struct nvme_ns *nvme_ns, int rc);
void nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns); void nvme_ctrlr_depopulate_namespace_done(struct nvme_ns *nvme_ns);
@ -192,4 +197,7 @@ void nvme_ctrlr_release(struct nvme_ctrlr *nvme_ctrlr);
void nvme_ctrlr_unregister(void *ctx); void nvme_ctrlr_unregister(void *ctx);
void nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr); void nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr);
int bdev_nvme_create_bdev_channel_cb(void *io_device, void *ctx_buf);
void bdev_nvme_destroy_bdev_channel_cb(void *io_device, void *ctx_buf);
#endif /* SPDK_COMMON_BDEV_NVME_H */ #endif /* SPDK_COMMON_BDEV_NVME_H */

View File

@ -1438,6 +1438,7 @@ test_pending_reset(void)
struct nvme_bdev *bdev; struct nvme_bdev *bdev;
struct spdk_bdev_io *first_bdev_io, *second_bdev_io; struct spdk_bdev_io *first_bdev_io, *second_bdev_io;
struct spdk_io_channel *ch1, *ch2; struct spdk_io_channel *ch1, *ch2;
struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2;
struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2;
int rc; int rc;
@ -1465,20 +1466,24 @@ test_pending_reset(void)
bdev = nvme_ctrlr->namespaces[0]->bdev; bdev = nvme_ctrlr->namespaces[0]->bdev;
SPDK_CU_ASSERT_FATAL(bdev != NULL); SPDK_CU_ASSERT_FATAL(bdev != NULL);
ch1 = spdk_get_io_channel(nvme_ctrlr); ch1 = spdk_get_io_channel(bdev);
SPDK_CU_ASSERT_FATAL(ch1 != NULL); SPDK_CU_ASSERT_FATAL(ch1 != NULL);
ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1);
first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
set_thread(1); set_thread(1);
ch2 = spdk_get_io_channel(nvme_ctrlr); ch2 = spdk_get_io_channel(bdev);
SPDK_CU_ASSERT_FATAL(ch2 != NULL); SPDK_CU_ASSERT_FATAL(ch2 != NULL);
ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); nbdev_ch2 = spdk_io_channel_get_ctx(ch2);
ctrlr_ch2 = nbdev_ch2->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL);
second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2);
second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
@ -1834,12 +1839,11 @@ static void
ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
enum spdk_bdev_io_type io_type) enum spdk_bdev_io_type io_type)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_nvme_ns *ns = NULL; struct spdk_nvme_ns *ns = NULL;
struct spdk_nvme_qpair *qpair = NULL; struct spdk_nvme_qpair *qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair)); CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
bdev_io->type = io_type; bdev_io->type = io_type;
bdev_io->internal.in_submit_request = true; bdev_io->internal.in_submit_request = true;
@ -1860,12 +1864,11 @@ static void
ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
enum spdk_bdev_io_type io_type) enum spdk_bdev_io_type io_type)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_nvme_ns *ns = NULL; struct spdk_nvme_ns *ns = NULL;
struct spdk_nvme_qpair *qpair = NULL; struct spdk_nvme_qpair *qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair)); CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
bdev_io->type = io_type; bdev_io->type = io_type;
bdev_io->internal.in_submit_request = true; bdev_io->internal.in_submit_request = true;
@ -1880,14 +1883,13 @@ ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
static void static void
ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{ {
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch); struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
struct ut_nvme_req *req; struct ut_nvme_req *req;
struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
struct spdk_nvme_ns *ns = NULL; struct spdk_nvme_ns *ns = NULL;
struct spdk_nvme_qpair *qpair = NULL; struct spdk_nvme_qpair *qpair = NULL;
CU_ASSERT(bdev_nvme_find_io_path(nbdev, ctrlr_ch, &ns, &qpair)); CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair));
/* Only compare and write now. */ /* Only compare and write now. */
bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE;
@ -1900,7 +1902,7 @@ ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *b
CU_ASSERT(bio->first_fused_submitted == true); CU_ASSERT(bio->first_fused_submitted == true);
/* First outstanding request is compare operation. */ /* First outstanding request is compare operation. */
req = TAILQ_FIRST(&ctrlr_ch->qpair->outstanding_reqs); req = TAILQ_FIRST(&qpair->outstanding_reqs);
SPDK_CU_ASSERT_FATAL(req != NULL); SPDK_CU_ASSERT_FATAL(req != NULL);
CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE);
req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE;
@ -1976,7 +1978,7 @@ test_submit_nvme_cmd(void)
set_thread(0); set_thread(0);
ch = spdk_get_io_channel(nvme_ctrlr); ch = spdk_get_io_channel(bdev);
SPDK_CU_ASSERT_FATAL(ch != NULL); SPDK_CU_ASSERT_FATAL(ch != NULL);
bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch);
@ -2111,6 +2113,7 @@ test_abort(void)
struct nvme_bdev *bdev; struct nvme_bdev *bdev;
struct spdk_bdev_io *write_io, *admin_io, *abort_io; struct spdk_bdev_io *write_io, *admin_io, *abort_io;
struct spdk_io_channel *ch1, *ch2; struct spdk_io_channel *ch1, *ch2;
struct nvme_bdev_channel *nbdev_ch1;
struct nvme_ctrlr_channel *ctrlr_ch1; struct nvme_ctrlr_channel *ctrlr_ch1;
int rc; int rc;
@ -2152,13 +2155,15 @@ test_abort(void)
set_thread(0); set_thread(0);
ch1 = spdk_get_io_channel(nvme_ctrlr); ch1 = spdk_get_io_channel(bdev);
SPDK_CU_ASSERT_FATAL(ch1 != NULL); SPDK_CU_ASSERT_FATAL(ch1 != NULL);
ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); nbdev_ch1 = spdk_io_channel_get_ctx(ch1);
ctrlr_ch1 = nbdev_ch1->ctrlr_ch;
SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL);
set_thread(1); set_thread(1);
ch2 = spdk_get_io_channel(nvme_ctrlr); ch2 = spdk_get_io_channel(bdev);
SPDK_CU_ASSERT_FATAL(ch2 != NULL); SPDK_CU_ASSERT_FATAL(ch2 != NULL);
write_io->internal.ch = (struct spdk_bdev_channel *)ch1; write_io->internal.ch = (struct spdk_bdev_channel *)ch1;

View File

@ -883,9 +883,12 @@ test_get_zone_info(void)
const char *controller_name = "nvme0"; const char *controller_name = "nvme0";
const char *bdev_name = "nvme0n1"; const char *bdev_name = "nvme0n1";
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct nvme_bdev *nvme_bdev;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct nvme_ctrlr_channel *ctrlr_ch; struct nvme_bdev_channel *nbdev_ch;
struct nvme_ns *nvme_ns;
struct nvme_ctrlr_channel ctrlr_ch = {};
#define MAX_ZONE_INFO_COUNT 64 #define MAX_ZONE_INFO_COUNT 64
struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT]; struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT];
struct spdk_ocssd_chunk_information_entry *chunk_info; struct spdk_ocssd_chunk_information_entry *chunk_info;
@ -915,12 +918,18 @@ test_get_zone_info(void)
bdev = spdk_bdev_get_by_name(bdev_name); bdev = spdk_bdev_get_by_name(bdev_name);
SPDK_CU_ASSERT_FATAL(bdev != NULL); SPDK_CU_ASSERT_FATAL(bdev != NULL);
ch = calloc(1, sizeof(*ch) + sizeof(*ctrlr_ch)); nvme_bdev = (struct nvme_bdev *)bdev->ctxt;
nvme_ns = nvme_bdev->nvme_ns;
SPDK_CU_ASSERT_FATAL(nvme_ns != NULL);
ch = calloc(1, sizeof(*ch) + sizeof(*nbdev_ch));
SPDK_CU_ASSERT_FATAL(ch != NULL); SPDK_CU_ASSERT_FATAL(ch != NULL);
ctrlr_ch = spdk_io_channel_get_ctx(ch); nbdev_ch = spdk_io_channel_get_ctx(ch);
ctrlr_ch->ctrlr = nvme_ctrlr; nbdev_ch->nvme_ns = nvme_ns;
ctrlr_ch->qpair = (struct spdk_nvme_qpair *)0x1; nbdev_ch->ctrlr_ch = &ctrlr_ch;
ctrlr_ch.ctrlr = nvme_ctrlr;
ctrlr_ch.qpair = (struct spdk_nvme_qpair *)0x1;
bdev_io = alloc_ocssd_io(); bdev_io = alloc_ocssd_io();
bdev_io->internal.cb = get_zone_info_cb; bdev_io->internal.cb = get_zone_info_cb;