bdev/ocssd: get zone information command

Added zone information command translating Open Channel's chunk
information to spdk_bdev_zone_info structure.

Change-Id: Ifdb15f2e0ea2fb8422810fa64f18942fcb6e4582
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468213
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Konrad Sztyber 2019-09-12 11:41:06 +02:00 committed by Tomasz Zawadzki
parent 6e640cfcbf
commit 7ac802e848
2 changed files with 419 additions and 18 deletions

View File

@ -52,11 +52,17 @@ struct bdev_ocssd_lba_offsets {
};
struct bdev_ocssd_io {
struct {
size_t iov_pos;
size_t iov_off;
uint64_t lba[SPDK_NVME_OCSSD_MAX_LBAL_ENTRIES];
} io;
union {
struct {
size_t iov_pos;
size_t iov_off;
uint64_t lba[SPDK_NVME_OCSSD_MAX_LBAL_ENTRIES];
} io;
struct {
size_t chunk_offset;
struct spdk_ocssd_chunk_information_entry chunk_info;
} zone_info;
};
};
struct ocssd_bdev {
@ -74,6 +80,12 @@ bdev_ocssd_get_ns_from_nvme(struct nvme_bdev_ns *nvme_ns)
return nvme_ns->type_ctx;
}
static struct bdev_ocssd_ns *
bdev_ocssd_get_ns_from_bdev(struct ocssd_bdev *ocssd_bdev)
{
return bdev_ocssd_get_ns_from_nvme(ocssd_bdev->nvme_bdev.nvme_ns);
}
static int
bdev_ocssd_library_init(void)
{
@ -130,14 +142,13 @@ bdev_ocssd_destruct(void *ctx)
return 0;
}
static uint64_t
bdev_ocssd_to_disk_lba(struct ocssd_bdev *ocssd_bdev, uint64_t lba)
static void
bdev_ocssd_translate_lba(struct ocssd_bdev *ocssd_bdev, uint64_t lba, uint64_t *grp,
uint64_t *pu, uint64_t *chk, uint64_t *lbk)
{
struct nvme_bdev_ns *nvme_ns = ocssd_bdev->nvme_bdev.nvme_ns;
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_nvme(nvme_ns);
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_bdev(ocssd_bdev);
const struct spdk_ocssd_geometry_data *geo = &ocssd_ns->geometry;
const struct bdev_ocssd_lba_offsets *offsets = &ocssd_ns->lba_offsets;
uint64_t addr_shift, lbk, chk, pu, grp;
uint64_t addr_shift;
/* To achieve best performance, we need to make sure that adjacent zones can be accessed
* in parallel. We accomplish this by having the following addressing scheme:
@ -147,16 +158,43 @@ bdev_ocssd_to_disk_lba(struct ocssd_bdev *ocssd_bdev, uint64_t lba)
*
* which means that neighbouring zones are placed in a different group and parallel unit.
*/
lbk = lba % geo->clba;
*lbk = lba % geo->clba;
addr_shift = geo->clba;
pu = (lba / addr_shift) % geo->num_pu;
*pu = (lba / addr_shift) % geo->num_pu;
addr_shift *= geo->num_pu;
grp = (lba / addr_shift) % geo->num_grp;
*grp = (lba / addr_shift) % geo->num_grp;
addr_shift *= geo->num_grp;
chk = (lba / addr_shift) % geo->num_chk;
*chk = (lba / addr_shift) % geo->num_chk;
}
static uint64_t
bdev_ocssd_from_disk_lba(struct ocssd_bdev *ocssd_bdev, uint64_t lba)
{
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_bdev(ocssd_bdev);
const struct spdk_ocssd_geometry_data *geometry = &ocssd_ns->geometry;
const struct bdev_ocssd_lba_offsets *offsets = &ocssd_ns->lba_offsets;
uint64_t lbk, chk, pu, grp;
lbk = (lba >> offsets->lbk) & ((1 << geometry->lbaf.lbk_len) - 1);
chk = (lba >> offsets->chk) & ((1 << geometry->lbaf.chk_len) - 1);
pu = (lba >> offsets->pu) & ((1 << geometry->lbaf.pu_len) - 1);
grp = (lba >> offsets->grp) & ((1 << geometry->lbaf.grp_len) - 1);
return lbk + pu * geometry->clba + grp * geometry->num_pu * geometry->clba +
chk * geometry->num_pu * geometry->num_grp * geometry->clba;
}
static uint64_t
bdev_ocssd_to_disk_lba(struct ocssd_bdev *ocssd_bdev, uint64_t lba)
{
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_bdev(ocssd_bdev);
const struct bdev_ocssd_lba_offsets *offsets = &ocssd_ns->lba_offsets;
uint64_t lbk, chk, pu, grp;
bdev_ocssd_translate_lba(ocssd_bdev, lba, &grp, &pu, &chk, &lbk);
return (lbk << offsets->lbk) |
(chk << offsets->chk) |
@ -333,6 +371,115 @@ bdev_ocssd_reset_zone(struct spdk_io_channel *ioch, struct spdk_bdev_io *bdev_io
ocdev_io->io.lba, num_zones, NULL, bdev_ocssd_reset_zone_cb, bdev_io);
}
static int _bdev_ocssd_get_zone_info(struct spdk_bdev_io *bdev_io);
static void
bdev_ocssd_fill_zone_info(struct ocssd_bdev *ocssd_bdev, struct spdk_bdev_zone_info *zone_info,
const struct spdk_ocssd_chunk_information_entry *chunk_info)
{
struct nvme_bdev *nvme_bdev = &ocssd_bdev->nvme_bdev;
zone_info->zone_id = bdev_ocssd_from_disk_lba(ocssd_bdev, chunk_info->slba);
zone_info->write_pointer = zone_info->zone_id;
if (chunk_info->cs.free) {
zone_info->state = SPDK_BDEV_ZONE_STATE_EMPTY;
} else if (chunk_info->cs.closed) {
zone_info->state = SPDK_BDEV_ZONE_STATE_FULL;
} else if (chunk_info->cs.open) {
zone_info->state = SPDK_BDEV_ZONE_STATE_OPEN;
zone_info->write_pointer += chunk_info->wp % nvme_bdev->disk.zone_size;
} else if (chunk_info->cs.offline) {
zone_info->state = SPDK_BDEV_ZONE_STATE_OFFLINE;
} else {
SPDK_ERRLOG("Unknown chunk state, assuming offline\n");
zone_info->state = SPDK_BDEV_ZONE_STATE_OFFLINE;
}
if (chunk_info->ct.size_deviate) {
zone_info->capacity = chunk_info->cnlb;
} else {
zone_info->capacity = nvme_bdev->disk.zone_size;
}
}
static void
bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
{
struct spdk_bdev_io *bdev_io = ctx;
struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt;
struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
struct spdk_ocssd_chunk_information_entry *chunk_info = &ocdev_io->zone_info.chunk_info;
struct spdk_bdev_zone_info *zone_info;
int rc;
if (spdk_unlikely(spdk_nvme_cpl_is_error(cpl))) {
spdk_bdev_io_complete_nvme_status(bdev_io, 0, cpl->status.sct, cpl->status.sc);
return;
}
zone_info = ((struct spdk_bdev_zone_info *)bdev_io->u.zone_mgmt.buf) +
ocdev_io->zone_info.chunk_offset;
bdev_ocssd_fill_zone_info(ocssd_bdev, zone_info, chunk_info);
if (++ocdev_io->zone_info.chunk_offset == bdev_io->u.zone_mgmt.num_zones) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
} else {
rc = _bdev_ocssd_get_zone_info(bdev_io);
if (spdk_unlikely(rc != 0)) {
if (rc == -ENOMEM) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
} else {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
}
}
}
}
static int
_bdev_ocssd_get_zone_info(struct spdk_bdev_io *bdev_io)
{
struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt;
struct nvme_bdev *nvme_bdev = &ocssd_bdev->nvme_bdev;
struct bdev_ocssd_ns *ocssd_ns = bdev_ocssd_get_ns_from_bdev(ocssd_bdev);
const struct spdk_ocssd_geometry_data *geo = &ocssd_ns->geometry;
struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
uint64_t lba, grp, pu, chk, lbk, offset;
lba = bdev_io->u.zone_mgmt.zone_id + ocdev_io->zone_info.chunk_offset *
nvme_bdev->disk.zone_size;
bdev_ocssd_translate_lba(ocssd_bdev, lba, &grp, &pu, &chk, &lbk);
offset = grp * geo->num_pu * geo->num_chk + pu * geo->num_chk + chk;
return spdk_nvme_ctrlr_cmd_get_log_page(nvme_bdev->nvme_bdev_ctrlr->ctrlr,
SPDK_OCSSD_LOG_CHUNK_INFO,
spdk_nvme_ns_get_id(nvme_bdev->nvme_ns->ns),
&ocdev_io->zone_info.chunk_info,
sizeof(ocdev_io->zone_info.chunk_info),
offset * sizeof(ocdev_io->zone_info.chunk_info),
bdev_ocssd_zone_info_cb, (void *)bdev_io);
}
static int
bdev_ocssd_get_zone_info(struct spdk_io_channel *ioch, struct spdk_bdev_io *bdev_io)
{
struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
if (bdev_io->u.zone_mgmt.num_zones < 1) {
SPDK_ERRLOG("Invalid number of zones: %"PRIu32"\n", bdev_io->u.zone_mgmt.num_zones);
return -EINVAL;
}
if (bdev_io->u.zone_mgmt.zone_id % bdev_io->bdev->zone_size != 0) {
SPDK_ERRLOG("Unaligned zone LBA: %"PRIu64"\n", bdev_io->u.zone_mgmt.zone_id);
return -EINVAL;
}
ocdev_io->zone_info.chunk_offset = 0;
return _bdev_ocssd_get_zone_info(bdev_io);
}
static int
bdev_ocssd_zone_management(struct spdk_io_channel *ioch, struct spdk_bdev_io *bdev_io)
{
@ -364,6 +511,10 @@ bdev_ocssd_submit_request(struct spdk_io_channel *ioch, struct spdk_bdev_io *bde
rc = bdev_ocssd_zone_management(ioch, bdev_io);
break;
case SPDK_BDEV_IO_TYPE_GET_ZONE_INFO:
rc = bdev_ocssd_get_zone_info(ioch, bdev_io);
break;
default:
rc = -EINVAL;
break;

View File

@ -73,6 +73,8 @@ struct spdk_nvme_ctrlr {
struct spdk_nvme_qpair *admin_qpair;
struct spdk_nvme_ns *ns;
uint32_t ns_count;
struct spdk_ocssd_chunk_information_entry *chunk_info;
uint64_t num_chunks;
LIST_ENTRY(spdk_nvme_ctrlr) list;
};
@ -101,6 +103,7 @@ free_controller(struct spdk_nvme_ctrlr *ctrlr)
CU_ASSERT(!nvme_bdev_ctrlr_get(&ctrlr->trid));
LIST_REMOVE(ctrlr, list);
spdk_nvme_ctrlr_free_io_qpair(ctrlr->admin_qpair);
free(ctrlr->chunk_info);
free(ctrlr->ns);
free(ctrlr);
}
@ -120,6 +123,10 @@ create_controller(const struct spdk_nvme_transport_id *trid, uint32_t ns_count,
ctrlr->ns = calloc(ns_count, sizeof(*ctrlr->ns));
SPDK_CU_ASSERT_FATAL(ctrlr->ns != NULL);
ctrlr->num_chunks = geo->num_grp * geo->num_pu * geo->num_chk;
ctrlr->chunk_info = calloc(ctrlr->num_chunks, sizeof(*ctrlr->chunk_info));
SPDK_CU_ASSERT_FATAL(ctrlr->chunk_info != NULL);
for (nsid = 0; nsid < ns_count; ++nsid) {
ctrlr->ns[nsid].nsid = nsid + 1;
}
@ -218,6 +225,12 @@ spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
return ctrlr->ns_count;
}
uint32_t
spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
{
return ns->nsid;
}
struct spdk_nvme_ns *
spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
{
@ -420,6 +433,24 @@ spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns,
return 0;
}
static struct spdk_nvme_cpl g_chunk_info_cpl;
static bool g_zone_info_status = true;
int
spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
uint8_t log_page, uint32_t nsid,
void *payload, uint32_t payload_size,
uint64_t offset,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
SPDK_CU_ASSERT_FATAL(offset + payload_size <= sizeof(*ctrlr->chunk_info) * ctrlr->num_chunks);
memcpy(payload, ((char *)ctrlr->chunk_info) + offset, payload_size);
cb_fn(cb_arg, &g_chunk_info_cpl);
return 0;
}
static void
create_bdev_cb(const char *bdev_name, int status, void *ctx)
{
@ -629,21 +660,27 @@ test_lba_translation(void)
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 1));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu + 68);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 0, 1));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * geometry.num_pu + 68);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size + 68);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 0, 1, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size + 68);
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
free_controller(ctrlr);
@ -673,27 +710,239 @@ test_lba_translation(void)
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, 0);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 0, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), 0);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size - 1);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, bdev->zone_size - 1, 0, 0, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size - 1);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, 1, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * (geometry.num_pu - 1));
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 0, geometry.num_pu - 1, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba), bdev->zone_size * (geometry.num_pu - 1));
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * (geometry.num_pu));
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 0, 1, 0, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba),
bdev->zone_size * geometry.num_pu * geometry.num_grp);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * (geometry.num_pu) + 68);
lba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev->zone_size * geometry.num_pu * geometry.num_grp + 68);
CU_ASSERT_EQUAL(lba, generate_lba(&geometry, 68, 1, 0, 0));
CU_ASSERT_EQUAL(bdev_ocssd_from_disk_lba(ocssd_bdev, lba),
bdev->zone_size * geometry.num_pu * geometry.num_grp + 68);
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
free_controller(ctrlr);
}
static void
get_zone_info_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
CU_ASSERT_EQUAL(g_zone_info_status, success);
}
static uint64_t
generate_chunk_offset(const struct spdk_ocssd_geometry_data *geo, uint64_t chk,
uint64_t pu, uint64_t grp)
{
return grp * geo->num_pu * geo->num_chk +
pu * geo->num_chk + chk;
}
static struct spdk_bdev_io *
alloc_ocssd_io(void)
{
struct spdk_bdev_io *bdev_io;
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct bdev_ocssd_io));
SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
return bdev_io;
}
static void
set_chunk_info(struct spdk_nvme_ctrlr *ctrlr, uint64_t offset,
const struct spdk_ocssd_chunk_information_entry *chunk_info)
{
assert(offset < ctrlr->num_chunks);
SPDK_CU_ASSERT_FATAL(offset < ctrlr->num_chunks);
ctrlr->chunk_info[offset] = *chunk_info;
}
static void
clear_chunk_info(struct spdk_nvme_ctrlr *ctrlr)
{
memset(ctrlr->chunk_info, 0, sizeof(*ctrlr->chunk_info) * ctrlr->num_chunks);
}
static void
test_get_zone_info(void)
{
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
struct spdk_nvme_transport_id trid = { .traddr = "00:00:00" };
struct ocssd_bdev *ocssd_bdev;
const char *controller_name = "nvme0";
const char *bdev_name = "nvme0n1";
struct spdk_bdev *bdev;
struct spdk_bdev_io *bdev_io;
#define MAX_ZONE_INFO_COUNT 64
struct spdk_bdev_zone_info zone_info[MAX_ZONE_INFO_COUNT];
struct spdk_ocssd_chunk_information_entry chunk_info = {};
struct spdk_ocssd_geometry_data geometry;
uint64_t chunk_offset;
int rc, offset;
geometry = (struct spdk_ocssd_geometry_data) {
.clba = 512,
.num_chk = 64,
.num_pu = 8,
.num_grp = 4,
.lbaf = {
.lbk_len = 9,
.chk_len = 6,
.pu_len = 3,
.grp_len = 2,
}
};
ctrlr = create_controller(&trid, 1, &geometry);
nvme_bdev_ctrlr = create_nvme_bdev_controller(&trid, controller_name);
rc = create_bdev(controller_name, bdev_name, 1);
CU_ASSERT_EQUAL(rc, 0);
bdev = spdk_bdev_get_by_name(bdev_name);
SPDK_CU_ASSERT_FATAL(bdev != NULL);
ocssd_bdev = SPDK_CONTAINEROF(bdev, struct ocssd_bdev, nvme_bdev.disk);
bdev_io = alloc_ocssd_io();
bdev_io->internal.cb = get_zone_info_cb;
bdev_io->bdev = bdev;
/* Verify empty zone */
bdev_io->u.zone_mgmt.zone_id = 0;
bdev_io->u.zone_mgmt.num_zones = 1;
bdev_io->u.zone_mgmt.buf = &zone_info;
chunk_info.cs.free = 1;
chunk_info.wp = 0;
set_chunk_info(ctrlr, 0, &chunk_info);
rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
CU_ASSERT_EQUAL(rc, 0);
clear_chunk_info(ctrlr);
CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_EMPTY);
CU_ASSERT_EQUAL(zone_info[0].zone_id, 0);
CU_ASSERT_EQUAL(zone_info[0].write_pointer, 0);
CU_ASSERT_EQUAL(zone_info[0].capacity, geometry.clba);
/* Verify open zone */
bdev_io->u.zone_mgmt.zone_id = bdev->zone_size;
bdev_io->u.zone_mgmt.num_zones = 1;
bdev_io->u.zone_mgmt.buf = &zone_info;
memset(&chunk_info, 0, sizeof(chunk_info));
chunk_info.cs.open = 1;
chunk_info.wp = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev_io->u.zone_mgmt.zone_id + 68);
chunk_info.slba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev_io->u.zone_mgmt.zone_id);
chunk_info.cnlb = 511;
chunk_info.ct.size_deviate = 1;
set_chunk_info(ctrlr, generate_chunk_offset(&geometry, 0, 1, 0), &chunk_info);
rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
CU_ASSERT_EQUAL(rc, 0);
clear_chunk_info(ctrlr);
CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OPEN);
CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev->zone_size);
CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev->zone_size + 68);
CU_ASSERT_EQUAL(zone_info[0].capacity, chunk_info.cnlb);
/* Verify offline zone at 2nd chunk */
bdev_io->u.zone_mgmt.zone_id = bdev->zone_size * geometry.num_pu * geometry.num_grp;
bdev_io->u.zone_mgmt.num_zones = 1;
bdev_io->u.zone_mgmt.buf = &zone_info;
memset(&chunk_info, 0, sizeof(chunk_info));
chunk_info.cs.offline = 1;
chunk_info.wp = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev_io->u.zone_mgmt.zone_id);
chunk_info.slba = bdev_ocssd_to_disk_lba(ocssd_bdev, bdev_io->u.zone_mgmt.zone_id);
set_chunk_info(ctrlr, generate_chunk_offset(&geometry, 1, 0, 0), &chunk_info);
rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
CU_ASSERT_EQUAL(rc, 0);
clear_chunk_info(ctrlr);
CU_ASSERT_EQUAL(zone_info[0].state, SPDK_BDEV_ZONE_STATE_OFFLINE);
CU_ASSERT_EQUAL(zone_info[0].zone_id, bdev_io->u.zone_mgmt.zone_id);
CU_ASSERT_EQUAL(zone_info[0].write_pointer, bdev_io->u.zone_mgmt.zone_id);
/* Verify multiple zones at a time */
bdev_io->u.zone_mgmt.zone_id = 0;
bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
bdev_io->u.zone_mgmt.buf = &zone_info;
for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) {
chunk_offset = generate_chunk_offset(&geometry,
(offset / (geometry.num_grp * geometry.num_pu)) % geometry.num_chk,
offset % geometry.num_pu,
(offset / geometry.num_pu) % geometry.num_grp);
memset(&chunk_info, 0, sizeof(chunk_info));
chunk_info.cs.open = 1;
chunk_info.wp = bdev_ocssd_to_disk_lba(ocssd_bdev, offset * bdev->zone_size + 68);
chunk_info.slba = bdev_ocssd_to_disk_lba(ocssd_bdev, offset * bdev->zone_size);
set_chunk_info(ctrlr, chunk_offset, &chunk_info);
}
rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
CU_ASSERT_EQUAL(rc, 0);
for (offset = 0; offset < MAX_ZONE_INFO_COUNT; ++offset) {
CU_ASSERT_EQUAL(zone_info[offset].state, SPDK_BDEV_ZONE_STATE_OPEN);
CU_ASSERT_EQUAL(zone_info[offset].zone_id, bdev->zone_size * offset);
CU_ASSERT_EQUAL(zone_info[offset].write_pointer, bdev->zone_size * offset + 68);
CU_ASSERT_EQUAL(zone_info[offset].capacity, geometry.clba);
}
clear_chunk_info(ctrlr);
/* Verify misaligned start zone LBA */
bdev_io->u.zone_mgmt.zone_id = 1;
bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
bdev_io->u.zone_mgmt.buf = &zone_info;
rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
CU_ASSERT_EQUAL(rc, -EINVAL);
/* Verify correct NVMe error forwarding */
bdev_io->u.zone_mgmt.zone_id = 0;
bdev_io->u.zone_mgmt.num_zones = MAX_ZONE_INFO_COUNT;
bdev_io->u.zone_mgmt.buf = &zone_info;
rc = bdev_ocssd_get_zone_info(NULL, bdev_io);
CU_ASSERT_EQUAL(rc, 0);
g_chunk_info_cpl = (struct spdk_nvme_cpl) {
.status = {
.sct = SPDK_NVME_SCT_GENERIC,
.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR
}
};
g_zone_info_status = false;
g_chunk_info_cpl = (struct spdk_nvme_cpl) {};
g_zone_info_status = true;
delete_nvme_bdev_controller(nvme_bdev_ctrlr);
free(bdev_io);
free_controller(ctrlr);
}
int
main(int argc, const char **argv)
{
@ -713,7 +962,8 @@ main(int argc, const char **argv)
if (
CU_add_test(suite, "test_create_controller", test_create_controller) == NULL ||
CU_add_test(suite, "test_device_geometry", test_device_geometry) == NULL ||
CU_add_test(suite, "test_lba_translation", test_lba_translation) == NULL
CU_add_test(suite, "test_lba_translation", test_lba_translation) == NULL ||
CU_add_test(suite, "test_get_zone_info", test_get_zone_info) == NULL
) {
CU_cleanup_registry();
return CU_get_error();