bdev/ocssd: find_io_path() returns bdev_ocssd_ns and spdk_nvme_ns pointers directly

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ifd5a3f5ab2e87d383da1c3b94949fa2ffd916ecc
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8150
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2021-06-02 03:13:58 +09:00 committed by Ben Walker
parent 55e4be3432
commit b7cc001a04

View File

@ -107,14 +107,16 @@ bdev_ocssd_get_ns_from_nvme(struct nvme_bdev_ns *nvme_ns)
static inline bool static inline bool
bdev_ocssd_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_path *io_path, bdev_ocssd_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_path *io_path,
struct nvme_bdev_ns **_nvme_ns, struct spdk_nvme_qpair **_qpair) struct bdev_ocssd_ns **_ocssd_ns,
struct spdk_nvme_ns **_ns, struct spdk_nvme_qpair **_qpair)
{ {
if (spdk_unlikely(io_path->qpair == NULL)) { if (spdk_unlikely(io_path->qpair == NULL)) {
/* The device is currently resetting. */ /* The device is currently resetting. */
return false; return false;
} }
*_nvme_ns = nbdev->nvme_ns; *_ocssd_ns = bdev_ocssd_get_ns_from_nvme(nbdev->nvme_ns);
*_ns = nbdev->nvme_ns->ns;
*_qpair = io_path->qpair; *_qpair = io_path->qpair;
return true; return true;
} }
@ -383,12 +385,11 @@ bdev_ocssd_read_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
} }
static int static int
bdev_ocssd_read(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns, bdev_ocssd_read(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct spdk_nvme_qpair *qpair, struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct bdev_ocssd_io *ocdev_io, struct iovec *iov, int iovcnt, struct bdev_ocssd_io *ocdev_io, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba) void *md, uint64_t lba_count, uint64_t lba)
{ {
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ns);
const size_t zone_size = bdev_ocssd_get_zone_size(ocssd_bdev); const size_t zone_size = bdev_ocssd_get_zone_size(ocssd_bdev);
if ((lba % zone_size) + lba_count > zone_size) { if ((lba % zone_size) + lba_count > zone_size) {
@ -403,7 +404,7 @@ bdev_ocssd_read(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns,
lba = bdev_ocssd_to_disk_lba(ocssd_ns, lba); lba = bdev_ocssd_to_disk_lba(ocssd_ns, lba);
return spdk_nvme_ns_cmd_readv_with_md(nvme_ns->ns, qpair, lba, return spdk_nvme_ns_cmd_readv_with_md(ns, qpair, lba,
lba_count, bdev_ocssd_read_cb, lba_count, bdev_ocssd_read_cb,
ocdev_io, 0, bdev_ocssd_reset_sgl, ocdev_io, 0, bdev_ocssd_reset_sgl,
bdev_ocssd_next_sge, md, 0, 0); bdev_ocssd_next_sge, md, 0, 0);
@ -424,12 +425,11 @@ bdev_ocssd_write_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
} }
static int static int
bdev_ocssd_write(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns, bdev_ocssd_write(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct spdk_nvme_qpair *qpair, struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct bdev_ocssd_io *ocdev_io, struct iovec *iov, int iovcnt, struct bdev_ocssd_io *ocdev_io, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba) void *md, uint64_t lba_count, uint64_t lba)
{ {
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ns);
const size_t zone_size = bdev_ocssd_get_zone_size(ocssd_bdev); const size_t zone_size = bdev_ocssd_get_zone_size(ocssd_bdev);
struct bdev_ocssd_zone *zone; struct bdev_ocssd_zone *zone;
int rc; int rc;
@ -452,7 +452,7 @@ bdev_ocssd_write(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns,
lba = bdev_ocssd_to_disk_lba(ocssd_ns, lba); lba = bdev_ocssd_to_disk_lba(ocssd_ns, lba);
rc = spdk_nvme_ns_cmd_writev_with_md(nvme_ns->ns, qpair, lba, rc = spdk_nvme_ns_cmd_writev_with_md(ns, qpair, lba,
lba_count, bdev_ocssd_write_cb, lba_count, bdev_ocssd_write_cb,
ocdev_io, 0, bdev_ocssd_reset_sgl, ocdev_io, 0, bdev_ocssd_reset_sgl,
bdev_ocssd_next_sge, md, 0, 0); bdev_ocssd_next_sge, md, 0, 0);
@ -479,12 +479,11 @@ bdev_ocssd_append_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
} }
static int static int
bdev_ocssd_zone_append(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns, bdev_ocssd_zone_append(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct spdk_nvme_qpair *qpair, struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct bdev_ocssd_io *ocdev_io, struct iovec *iov, int iovcnt, struct bdev_ocssd_io *ocdev_io, struct iovec *iov, int iovcnt,
void *md, uint64_t lba_count, uint64_t lba) void *md, uint64_t lba_count, uint64_t lba)
{ {
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ns);
struct bdev_ocssd_zone *zone; struct bdev_ocssd_zone *zone;
int rc = 0; int rc = 0;
@ -512,7 +511,7 @@ bdev_ocssd_zone_append(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_
lba = bdev_ocssd_to_disk_lba(ocssd_ns, zone->write_pointer); lba = bdev_ocssd_to_disk_lba(ocssd_ns, zone->write_pointer);
rc = spdk_nvme_ns_cmd_writev_with_md(nvme_ns->ns, qpair, lba, rc = spdk_nvme_ns_cmd_writev_with_md(ns, qpair, lba,
lba_count, bdev_ocssd_append_cb, lba_count, bdev_ocssd_append_cb,
ocdev_io, 0, bdev_ocssd_reset_sgl, ocdev_io, 0, bdev_ocssd_reset_sgl,
bdev_ocssd_next_sge, md, 0, 0); bdev_ocssd_next_sge, md, 0, 0);
@ -529,7 +528,8 @@ bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
{ {
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
struct nvme_bdev_ns *nvme_ns; struct bdev_ocssd_ns *ocssd_ns;
struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
int rc; int rc;
@ -539,13 +539,14 @@ bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
} }
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path, if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path,
&nvme_ns, &qpair))) { &ocssd_ns, &ns, &qpair))) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return; return;
} }
rc = bdev_ocssd_read(ocssd_bdev, rc = bdev_ocssd_read(ocssd_bdev,
nvme_ns, ocssd_ns,
ns,
qpair, qpair,
(struct bdev_ocssd_io *)bdev_io->driver_ctx, (struct bdev_ocssd_io *)bdev_io->driver_ctx,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovs,
@ -575,11 +576,10 @@ bdev_ocssd_reset_zone_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
} }
static int static int
bdev_ocssd_reset_zone(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns, bdev_ocssd_reset_zone(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct spdk_nvme_qpair *qpair, struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct bdev_ocssd_io *ocdev_io, uint64_t slba, size_t num_zones) struct bdev_ocssd_io *ocdev_io, uint64_t slba, size_t num_zones)
{ {
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ns);
uint64_t offset, zone_size = bdev_ocssd_get_zone_size(ocssd_bdev); uint64_t offset, zone_size = bdev_ocssd_get_zone_size(ocssd_bdev);
struct bdev_ocssd_zone *zone; struct bdev_ocssd_zone *zone;
int rc; int rc;
@ -601,7 +601,7 @@ bdev_ocssd_reset_zone(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_n
ocdev_io->io.zone = zone; ocdev_io->io.zone = zone;
rc = spdk_nvme_ocssd_ns_cmd_vector_reset(nvme_ns->ns, qpair, rc = spdk_nvme_ocssd_ns_cmd_vector_reset(ns, qpair,
ocdev_io->io.lba, num_zones, NULL, ocdev_io->io.lba, num_zones, NULL,
bdev_ocssd_reset_zone_cb, ocdev_io); bdev_ocssd_reset_zone_cb, ocdev_io);
if (spdk_unlikely(rc != 0)) { if (spdk_unlikely(rc != 0)) {
@ -611,8 +611,8 @@ bdev_ocssd_reset_zone(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_n
return rc; return rc;
} }
static int _bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, static int _bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct nvme_bdev_ns *nvme_ns, struct spdk_nvme_ns *ns,
struct bdev_ocssd_io *ocdev_io, uint64_t zone_id); struct bdev_ocssd_io *ocdev_io, uint64_t zone_id);
static void static void
@ -654,7 +654,7 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx); struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx);
struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt;
struct nvme_io_path *io_path; struct nvme_io_path *io_path;
struct nvme_bdev_ns *nvme_ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
struct bdev_ocssd_ns *ocssd_ns; struct bdev_ocssd_ns *ocssd_ns;
struct spdk_bdev_zone_info *zone_info; struct spdk_bdev_zone_info *zone_info;
@ -667,13 +667,12 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io)); io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path, &nvme_ns, &qpair))) { if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path, &ocssd_ns, &ns,
&qpair))) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return; return;
} }
ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ns);
zone_info = ((struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf) + zone_info = ((struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf) +
ocdev_io->zone_info.chunk_offset; ocdev_io->zone_info.chunk_offset;
bdev_ocssd_fill_zone_info(ocssd_bdev, ocssd_ns, zone_info, chunk_info); bdev_ocssd_fill_zone_info(ocssd_bdev, ocssd_ns, zone_info, chunk_info);
@ -681,7 +680,7 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
if (++ocdev_io->zone_info.chunk_offset == bdev_io->u.zone_mgmt.num_zones) { if (++ocdev_io->zone_info.chunk_offset == bdev_io->u.zone_mgmt.num_zones) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
} else { } else {
rc = _bdev_ocssd_get_zone_info(ocssd_bdev, nvme_ns, ocdev_io, rc = _bdev_ocssd_get_zone_info(ocssd_bdev, ocssd_ns, ns, ocdev_io,
bdev_io->u.zone_mgmt.zone_id); bdev_io->u.zone_mgmt.zone_id);
if (spdk_unlikely(rc != 0)) { if (spdk_unlikely(rc != 0)) {
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
@ -694,18 +693,18 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
} }
static int static int
_bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns, _bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct spdk_nvme_ns *ns,
struct bdev_ocssd_io *ocdev_io, uint64_t zone_id) struct bdev_ocssd_io *ocdev_io, uint64_t zone_id)
{ {
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ns);
uint64_t lba, offset, zone_size = bdev_ocssd_get_zone_size(ocssd_bdev); uint64_t lba, offset, zone_size = bdev_ocssd_get_zone_size(ocssd_bdev);
lba = zone_id + ocdev_io->zone_info.chunk_offset * zone_size; lba = zone_id + ocdev_io->zone_info.chunk_offset * zone_size;
offset = bdev_ocssd_to_chunk_info_offset(ocssd_ns, lba); offset = bdev_ocssd_to_chunk_info_offset(ocssd_ns, lba);
return spdk_nvme_ctrlr_cmd_get_log_page(spdk_nvme_ns_get_ctrlr(nvme_ns->ns), return spdk_nvme_ctrlr_cmd_get_log_page(spdk_nvme_ns_get_ctrlr(ns),
SPDK_OCSSD_LOG_CHUNK_INFO, SPDK_OCSSD_LOG_CHUNK_INFO,
spdk_nvme_ns_get_id(nvme_ns->ns), spdk_nvme_ns_get_id(ns),
&ocdev_io->zone_info.chunk_info, &ocdev_io->zone_info.chunk_info,
sizeof(ocdev_io->zone_info.chunk_info), sizeof(ocdev_io->zone_info.chunk_info),
offset * sizeof(ocdev_io->zone_info.chunk_info), offset * sizeof(ocdev_io->zone_info.chunk_info),
@ -713,7 +712,8 @@ _bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nv
} }
static int static int
bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns, bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct spdk_nvme_ns *ns,
struct bdev_ocssd_io *ocdev_io, uint64_t zone_id, uint32_t num_zones) struct bdev_ocssd_io *ocdev_io, uint64_t zone_id, uint32_t num_zones)
{ {
uint64_t zone_size = bdev_ocssd_get_zone_size(ocssd_bdev); uint64_t zone_size = bdev_ocssd_get_zone_size(ocssd_bdev);
@ -730,17 +730,19 @@ bdev_ocssd_get_zone_info(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvm
ocdev_io->zone_info.chunk_offset = 0; ocdev_io->zone_info.chunk_offset = 0;
return _bdev_ocssd_get_zone_info(ocssd_bdev, nvme_ns, ocdev_io, zone_id); return _bdev_ocssd_get_zone_info(ocssd_bdev, ocssd_ns, ns, ocdev_io, zone_id);
} }
static int static int
bdev_ocssd_zone_management(struct ocssd_bdev *ocssd_bdev, struct nvme_bdev_ns *nvme_ns, bdev_ocssd_zone_management(struct ocssd_bdev *ocssd_bdev, struct bdev_ocssd_ns *ocssd_ns,
struct spdk_nvme_qpair *qpair, struct spdk_bdev_io *bdev_io) struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
struct spdk_bdev_io *bdev_io)
{ {
switch (bdev_io->u.zone_mgmt.zone_action) { switch (bdev_io->u.zone_mgmt.zone_action) {
case SPDK_BDEV_ZONE_RESET: case SPDK_BDEV_ZONE_RESET:
return bdev_ocssd_reset_zone(ocssd_bdev, return bdev_ocssd_reset_zone(ocssd_bdev,
nvme_ns, ocssd_ns,
ns,
qpair, qpair,
(struct bdev_ocssd_io *)bdev_io->driver_ctx, (struct bdev_ocssd_io *)bdev_io->driver_ctx,
bdev_io->u.zone_mgmt.zone_id, bdev_io->u.zone_mgmt.zone_id,
@ -797,11 +799,12 @@ _bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev
struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt; struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx; struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
struct nvme_bdev_ns *nvme_ns; struct bdev_ocssd_ns *ocssd_ns;
struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path, if (spdk_unlikely(!bdev_ocssd_find_io_path(&ocssd_bdev->nvme_bdev, io_path,
&nvme_ns, &qpair))) { &ocssd_ns, &ns, &qpair))) {
return -1; return -1;
} }
@ -809,7 +812,8 @@ _bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev
case SPDK_BDEV_IO_TYPE_READ: case SPDK_BDEV_IO_TYPE_READ:
if (bdev_io->u.bdev.iovs && bdev_io->u.bdev.iovs[0].iov_base) { if (bdev_io->u.bdev.iovs && bdev_io->u.bdev.iovs[0].iov_base) {
return bdev_ocssd_read(ocssd_bdev, return bdev_ocssd_read(ocssd_bdev,
nvme_ns, ocssd_ns,
ns,
qpair, qpair,
ocdev_io, ocdev_io,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovs,
@ -825,7 +829,8 @@ _bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
return bdev_ocssd_write(ocssd_bdev, return bdev_ocssd_write(ocssd_bdev,
nvme_ns, ocssd_ns,
ns,
qpair, qpair,
ocdev_io, ocdev_io,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovs,
@ -835,18 +840,20 @@ _bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev
bdev_io->u.bdev.offset_blocks); bdev_io->u.bdev.offset_blocks);
case SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT: case SPDK_BDEV_IO_TYPE_ZONE_MANAGEMENT:
return bdev_ocssd_zone_management(ocssd_bdev, nvme_ns, qpair, bdev_io); return bdev_ocssd_zone_management(ocssd_bdev, ocssd_ns, ns, qpair, bdev_io);
case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO: case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO:
return bdev_ocssd_get_zone_info(ocssd_bdev, return bdev_ocssd_get_zone_info(ocssd_bdev,
nvme_ns, ocssd_ns,
ns,
ocdev_io, ocdev_io,
bdev_io->u.zone_mgmt.zone_id, bdev_io->u.zone_mgmt.zone_id,
bdev_io->u.zone_mgmt.num_zones); bdev_io->u.zone_mgmt.num_zones);
case SPDK_BDEV_IO_TYPE_ZONE_APPEND: case SPDK_BDEV_IO_TYPE_ZONE_APPEND:
return bdev_ocssd_zone_append(ocssd_bdev, return bdev_ocssd_zone_append(ocssd_bdev,
nvme_ns, ocssd_ns,
ns,
qpair, qpair,
ocdev_io, ocdev_io,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovs,