ftl: Added unit tests for FTL library

This patch implements unit tests for the following modules:
 * band
 * PPA (Physical Page Address) translations
 * write buffer

Change-Id: Ia7292bd3027347e8a3da77dafe71cde2c016bf38
Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/c/431328
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Wojciech Malikowski 2018-10-29 10:25:26 -04:00 committed by Ben Walker
parent 05b43152b2
commit 70b86ec995
15 changed files with 1332 additions and 0 deletions

View File

@ -85,6 +85,7 @@ extern "C" {
#include <sys/un.h>
#include <sys/user.h>
#include <sys/wait.h>
#include <regex.h>
/* GNU extension */
#include <getopt.h>

View File

@ -167,7 +167,9 @@ ftl_rwb_batch_init(struct ftl_rwb *rwb, struct ftl_rwb_batch *batch, unsigned in
return 0;
error:
free(batch->entries);
batch->entries = NULL;
spdk_dma_free(batch->buffer);
batch->buffer = NULL;
return -1;
}

View File

@ -40,6 +40,7 @@ DIRS-$(CONFIG_REDUCE) += reduce
ifeq ($(OS),Linux)
DIRS-$(CONFIG_VHOST) += vhost
endif
DIRS-$(CONFIG_FTL) += ftl
.PHONY: all clean $(DIRS-y)

View File

@ -0,0 +1,44 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = ftl_rwb.c ftl_ppa ftl_band.c
.PHONY: all clean $(DIRS-y)
all: $(DIRS-y)
clean: $(DIRS-y)
include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk

View File

@ -0,0 +1,117 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/ftl.h"
static struct spdk_ftl_dev *
test_init_ftl_dev(const struct spdk_ocssd_geometry_data *geo,
const struct spdk_ftl_punit_range *range)
{
struct spdk_ftl_dev *dev;
unsigned int punit;
dev = calloc(1, sizeof(*dev));
SPDK_CU_ASSERT_FATAL(dev != NULL);
dev->xfer_size = geo->ws_opt;
dev->geo = *geo;
dev->range = *range;
dev->bands = calloc(geo->num_chk, sizeof(*dev->bands));
SPDK_CU_ASSERT_FATAL(dev->bands != NULL);
dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits));
SPDK_CU_ASSERT_FATAL(dev->punits != NULL);
for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) {
punit = range->begin + i;
dev->punits[i].dev = dev;
dev->punits[i].start_ppa.grp = punit % geo->num_grp;
dev->punits[i].start_ppa.pu = punit / geo->num_grp;
}
return dev;
}
static struct ftl_band *
test_init_ftl_band(struct spdk_ftl_dev *dev, size_t id)
{
struct ftl_band *band;
struct ftl_chunk *chunk;
SPDK_CU_ASSERT_FATAL(dev != NULL);
SPDK_CU_ASSERT_FATAL(id < dev->geo.num_chk);
band = &dev->bands[id];
band->dev = dev;
band->id = id;
CIRCLEQ_INIT(&band->chunks);
band->md.vld_map = spdk_bit_array_create(ftl_num_band_lbks(dev));
SPDK_CU_ASSERT_FATAL(band->md.vld_map != NULL);
band->chunk_buf = calloc(ftl_dev_num_punits(dev), sizeof(*band->chunk_buf));
SPDK_CU_ASSERT_FATAL(band->chunk_buf != NULL);
for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) {
chunk = &band->chunk_buf[i];
chunk->pos = i;
chunk->state = FTL_CHUNK_STATE_CLOSED;
chunk->punit = &dev->punits[i];
chunk->start_ppa = dev->punits[i].start_ppa;
chunk->start_ppa.chk = band->id;
CIRCLEQ_INSERT_TAIL(&band->chunks, chunk, circleq);
band->num_chunks++;
}
pthread_spin_init(&band->md.lock, PTHREAD_PROCESS_PRIVATE);
return band;
}
static void
test_free_ftl_dev(struct spdk_ftl_dev *dev)
{
SPDK_CU_ASSERT_FATAL(dev != NULL);
free(dev->punits);
free(dev->bands);
free(dev);
}
static void
test_free_ftl_band(struct ftl_band *band)
{
SPDK_CU_ASSERT_FATAL(band != NULL);
spdk_bit_array_free(&band->md.vld_map);
free(band->chunk_buf);
free(band->md.lba_map);
}

View File

@ -0,0 +1 @@
ftl_band_ut

View File

@ -0,0 +1,38 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_band_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -0,0 +1,348 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_core.c"
#include "ftl/ftl_band.c"
#include "../common/utils.c"
#define TEST_BAND_IDX 68
#define TEST_LBA 0x68676564
static struct spdk_ocssd_geometry_data g_geo = {
.num_grp = 4,
.num_pu = 3,
.num_chk = 1500,
.clba = 100,
.ws_opt = 16,
.ws_min = 4,
};
static struct spdk_ftl_punit_range g_range = {
.begin = 2,
.end = 9,
};
static struct spdk_ftl_dev *g_dev;
static struct ftl_band *g_band;
static void
setup_band(void)
{
int rc;
g_dev = test_init_ftl_dev(&g_geo, &g_range);
g_band = test_init_ftl_band(g_dev, TEST_BAND_IDX);
rc = ftl_band_alloc_md(g_band);
CU_ASSERT_EQUAL_FATAL(rc, 0);
}
static void
cleanup_band(void)
{
test_free_ftl_band(g_band);
test_free_ftl_dev(g_dev);
}
static struct ftl_ppa
ppa_from_punit(uint64_t punit)
{
struct ftl_ppa ppa = {};
ppa.grp = punit % g_geo.num_grp;
ppa.pu = punit / g_geo.num_grp;
return ppa;
}
static uint64_t
offset_from_ppa(struct ftl_ppa ppa, struct ftl_band *band)
{
struct spdk_ftl_dev *dev = band->dev;
unsigned int punit;
/* TODO: ftl_ppa_flatten_punit should return uint32_t */
punit = ftl_ppa_flatten_punit(dev, ppa);
CU_ASSERT_EQUAL(ppa.chk, band->id);
return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
}
static void
test_band_lbkoff_from_ppa_base(void)
{
struct ftl_ppa ppa;
uint64_t offset, i, flat_lun = 0;
setup_band();
for (i = g_range.begin; i < g_range.end; ++i) {
ppa = ppa_from_punit(i);
ppa.chk = TEST_BAND_IDX;
offset = ftl_band_lbkoff_from_ppa(g_band, ppa);
CU_ASSERT_EQUAL(offset, flat_lun * ftl_dev_lbks_in_chunk(g_dev));
flat_lun++;
}
cleanup_band();
}
static void
test_band_lbkoff_from_ppa_lbk(void)
{
struct ftl_ppa ppa;
uint64_t offset, expect, i, j;
setup_band();
for (i = g_range.begin; i < g_range.end; ++i) {
for (j = 0; j < g_geo.clba; ++j) {
ppa = ppa_from_punit(i);
ppa.chk = TEST_BAND_IDX;
ppa.lbk = j;
offset = ftl_band_lbkoff_from_ppa(g_band, ppa);
expect = offset_from_ppa(ppa, g_band);
CU_ASSERT_EQUAL(offset, expect);
}
}
cleanup_band();
}
static void
test_band_ppa_from_lbkoff(void)
{
struct ftl_ppa ppa, expect;
uint64_t offset, i, j;
setup_band();
for (i = g_range.begin; i < g_range.end; ++i) {
for (j = 0; j < g_geo.clba; ++j) {
expect = ppa_from_punit(i);
expect.chk = TEST_BAND_IDX;
expect.lbk = j;
offset = ftl_band_lbkoff_from_ppa(g_band, expect);
ppa = ftl_band_ppa_from_lbkoff(g_band, offset);
CU_ASSERT_EQUAL(ppa.ppa, expect.ppa);
}
}
cleanup_band();
}
static void
test_band_set_addr(void)
{
struct ftl_md *md;
struct ftl_ppa ppa;
uint64_t offset = 0;
setup_band();
md = &g_band->md;
ppa = ppa_from_punit(g_range.begin);
ppa.chk = TEST_BAND_IDX;
CU_ASSERT_EQUAL(md->num_vld, 0);
offset = offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa);
CU_ASSERT_EQUAL(md->num_vld, 1);
CU_ASSERT_EQUAL(md->lba_map[offset], TEST_LBA);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset));
ppa.pu++;
offset = offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, ppa);
CU_ASSERT_EQUAL(md->num_vld, 2);
CU_ASSERT_EQUAL(md->lba_map[offset], TEST_LBA + 1);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset));
ppa.pu--;
offset = offset_from_ppa(ppa, g_band);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset));
cleanup_band();
}
static void
test_invalidate_addr(void)
{
struct ftl_md *md;
struct ftl_ppa ppa;
uint64_t offset[2];
setup_band();
md = &g_band->md;
ppa = ppa_from_punit(g_range.begin);
ppa.chk = TEST_BAND_IDX;
offset[0] = offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa);
CU_ASSERT_EQUAL(md->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[0]));
ftl_invalidate_addr(g_band->dev, ppa);
CU_ASSERT_EQUAL(md->num_vld, 0);
CU_ASSERT_FALSE(spdk_bit_array_get(md->vld_map, offset[0]));
offset[0] = offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA, ppa);
ppa.pu++;
offset[1] = offset_from_ppa(ppa, g_band);
ftl_band_set_addr(g_band, TEST_LBA + 1, ppa);
CU_ASSERT_EQUAL(md->num_vld, 2);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[0]));
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[1]));
ftl_invalidate_addr(g_band->dev, ppa);
CU_ASSERT_EQUAL(md->num_vld, 1);
CU_ASSERT_TRUE(spdk_bit_array_get(md->vld_map, offset[0]));
CU_ASSERT_FALSE(spdk_bit_array_get(md->vld_map, offset[1]));
cleanup_band();
}
static void
test_next_xfer_ppa(void)
{
struct ftl_ppa ppa, result, expect;
setup_band();
/* Verify simple one lbk incremention */
ppa = ppa_from_punit(g_range.begin);
ppa.chk = TEST_BAND_IDX;
ppa.lbk = 0;
expect = ppa;
expect.lbk = 1;
result = ftl_band_next_xfer_ppa(g_band, ppa, 1);
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify jumping between chunks */
expect = ppa_from_punit(g_range.begin + 1);
expect.chk = TEST_BAND_IDX;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size);
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify jumping works with unaligned offsets */
expect = ppa_from_punit(g_range.begin + 1);
expect.chk = TEST_BAND_IDX;
expect.lbk = 3;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size + 3);
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify jumping from last chunk to the first one */
expect = ppa_from_punit(g_range.begin);
expect.chk = TEST_BAND_IDX;
expect.lbk = g_dev->xfer_size;
ppa = ppa_from_punit(g_range.end);
ppa.chk = TEST_BAND_IDX;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size);
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify jumping from last chunk to the first one with unaligned offset */
expect = ppa_from_punit(g_range.begin);
expect.chk = TEST_BAND_IDX;
expect.lbk = g_dev->xfer_size + 2;
ppa = ppa_from_punit(g_range.end);
ppa.chk = TEST_BAND_IDX;
result = ftl_band_next_xfer_ppa(g_band, ppa, g_dev->xfer_size + 2);
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Verify large offset spanning across the whole band multiple times */
expect = ppa_from_punit(g_range.begin);
expect.chk = TEST_BAND_IDX;
expect.lbk = g_dev->xfer_size * 5 + 4;
ppa = ppa_from_punit(g_range.begin);
ppa.chk = TEST_BAND_IDX;
ppa.lbk = g_dev->xfer_size * 2 + 1;
result = ftl_band_next_xfer_ppa(g_band, ppa, 3 * g_dev->xfer_size *
ftl_dev_num_punits(g_dev) + 3);
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
/* Remove one chunk and verify it's skipped properly */
g_band->chunk_buf[1].state = FTL_CHUNK_STATE_BAD;
CIRCLEQ_REMOVE(&g_band->chunks, &g_band->chunk_buf[1], circleq);
g_band->num_chunks--;
expect = ppa_from_punit(g_range.begin + 2);
expect.chk = TEST_BAND_IDX;
expect.lbk = g_dev->xfer_size * 5 + 4;
ppa = ppa_from_punit(g_range.begin);
ppa.chk = TEST_BAND_IDX;
ppa.lbk = g_dev->xfer_size * 2 + 1;
result = ftl_band_next_xfer_ppa(g_band, ppa, 3 * g_dev->xfer_size *
(ftl_dev_num_punits(g_dev) - 1) + g_dev->xfer_size + 3);
CU_ASSERT_EQUAL(result.ppa, expect.ppa);
cleanup_band();
}
int
main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("ftl_band_suite", NULL, NULL);
if (!suite) {
CU_cleanup_registry();
return CU_get_error();
}
if (
CU_add_test(suite, "test_band_lbkoff_from_ppa_base",
test_band_lbkoff_from_ppa_base) == NULL
|| CU_add_test(suite, "test_band_lbkoff_from_ppa_lbk",
test_band_lbkoff_from_ppa_lbk) == NULL
|| CU_add_test(suite, "test_band_ppa_from_lbkoff",
test_band_ppa_from_lbkoff) == NULL
|| CU_add_test(suite, "test_band_set_addr",
test_band_set_addr) == NULL
|| CU_add_test(suite, "test_invalidate_addr",
test_invalidate_addr) == NULL
|| CU_add_test(suite, "test_next_xfer_ppa",
test_next_xfer_ppa) == NULL
) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

1
test/unit/lib/ftl/ftl_ppa/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
ftl_ppa_ut

View File

@ -0,0 +1,38 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_ppa_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -0,0 +1,275 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_core.h"
#define L2P_TABLE_SIZE 1024
static struct spdk_ftl_dev *g_dev;
static struct spdk_ftl_dev *
test_alloc_dev(size_t size)
{
struct spdk_ftl_dev *dev;
dev = calloc(1, sizeof(*dev));
dev->num_lbas = L2P_TABLE_SIZE;
dev->l2p = calloc(L2P_TABLE_SIZE, size);
return dev;
}
static void
clean_l2p(void)
{
size_t l2p_elem_size;
if (ftl_ppa_packed(g_dev)) {
l2p_elem_size = sizeof(uint32_t);
} else {
l2p_elem_size = sizeof(uint64_t);
}
memset(g_dev->l2p, 0, g_dev->num_lbas * l2p_elem_size);
}
static int
setup_l2p_32bit(void)
{
g_dev = test_alloc_dev(sizeof(uint32_t));
g_dev->ppaf.lbk_offset = 0;
g_dev->ppaf.lbk_mask = (1 << 8) - 1;
g_dev->ppaf.chk_offset = 8;
g_dev->ppaf.chk_mask = (1 << 4) - 1;
g_dev->ppaf.pu_offset = g_dev->ppaf.chk_offset + 4;
g_dev->ppaf.pu_mask = (1 << 3) - 1;
g_dev->ppaf.grp_offset = g_dev->ppaf.pu_offset + 3;
g_dev->ppaf.grp_mask = (1 << 2) - 1;
g_dev->ppa_len = g_dev->ppaf.grp_offset + 2;
return 0;
}
static int
setup_l2p_64bit(void)
{
g_dev = test_alloc_dev(sizeof(uint64_t));
g_dev->ppaf.lbk_offset = 0;
g_dev->ppaf.lbk_mask = (1UL << 31) - 1;
g_dev->ppaf.chk_offset = 31;
g_dev->ppaf.chk_mask = (1 << 4) - 1;
g_dev->ppaf.pu_offset = g_dev->ppaf.chk_offset + 4;
g_dev->ppaf.pu_mask = (1 << 3) - 1;
g_dev->ppaf.grp_offset = g_dev->ppaf.pu_offset + 3;
g_dev->ppaf.grp_mask = (1 << 2) - 1;
g_dev->ppa_len = g_dev->ppaf.grp_offset + 2;
return 0;
}
static int
cleanup(void)
{
free(g_dev->l2p);
free(g_dev);
g_dev = NULL;
return 0;
}
static void
test_ppa_pack(void)
{
struct ftl_ppa orig = {}, ppa;
/* Check valid address transformation */
orig.lbk = 4;
orig.chk = 3;
orig.pu = 2;
orig.grp = 1;
ppa = ftl_ppa_to_packed(g_dev, orig);
CU_ASSERT_TRUE(ppa.ppa <= UINT32_MAX);
CU_ASSERT_FALSE(ppa.pack.cached);
ppa = ftl_ppa_from_packed(g_dev, ppa);
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa));
CU_ASSERT_EQUAL(ppa.ppa, orig.ppa);
/* Check invalid address transformation */
orig = ftl_to_ppa(FTL_PPA_INVALID);
ppa = ftl_ppa_to_packed(g_dev, orig);
CU_ASSERT_TRUE(ppa.ppa <= UINT32_MAX);
ppa = ftl_ppa_from_packed(g_dev, ppa);
CU_ASSERT_TRUE(ftl_ppa_invalid(ppa));
/* Check cached entry offset transformation */
orig.cached = 1;
orig.offset = 1024;
ppa = ftl_ppa_to_packed(g_dev, orig);
CU_ASSERT_TRUE(ppa.ppa <= UINT32_MAX);
CU_ASSERT_TRUE(ppa.pack.cached);
ppa = ftl_ppa_from_packed(g_dev, ppa);
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa));
CU_ASSERT_TRUE(ftl_ppa_cached(ppa));
CU_ASSERT_EQUAL(ppa.ppa, orig.ppa);
clean_l2p();
}
static void
test_ppa_trans(void)
{
struct ftl_ppa ppa = {}, orig = {};
size_t i;
for (i = 0; i < L2P_TABLE_SIZE; ++i) {
ppa.lbk = i % (g_dev->ppaf.lbk_mask + 1);
ppa.chk = i % (g_dev->ppaf.chk_mask + 1);
ppa.pu = i % (g_dev->ppaf.pu_mask + 1);
ppa.grp = i % (g_dev->ppaf.grp_mask + 1);
ftl_l2p_set(g_dev, i, ppa);
}
for (i = 0; i < L2P_TABLE_SIZE; ++i) {
orig.lbk = i % (g_dev->ppaf.lbk_mask + 1);
orig.chk = i % (g_dev->ppaf.chk_mask + 1);
orig.pu = i % (g_dev->ppaf.pu_mask + 1);
orig.grp = i % (g_dev->ppaf.grp_mask + 1);
ppa = ftl_l2p_get(g_dev, i);
CU_ASSERT_EQUAL(ppa.ppa, orig.ppa);
}
clean_l2p();
}
static void
test_ppa_invalid(void)
{
struct ftl_ppa ppa;
size_t i;
/* Set every other LBA as invalid */
for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
ftl_l2p_set(g_dev, i, ftl_to_ppa(FTL_PPA_INVALID));
}
/* Check every even LBA is invalid while others are fine */
for (i = 0; i < L2P_TABLE_SIZE; ++i) {
ppa = ftl_l2p_get(g_dev, i);
if (i % 2 == 0) {
CU_ASSERT_TRUE(ftl_ppa_invalid(ppa));
} else {
CU_ASSERT_FALSE(ftl_ppa_invalid(ppa));
}
}
clean_l2p();
}
static void
test_ppa_cached(void)
{
struct ftl_ppa ppa;
size_t i;
/* Set every other LBA is cached */
for (i = 0; i < L2P_TABLE_SIZE; i += 2) {
ppa.cached = 1;
ppa.offset = i;
ftl_l2p_set(g_dev, i, ppa);
}
/* Check every even LBA is cached while others are not */
for (i = 0; i < L2P_TABLE_SIZE; ++i) {
ppa = ftl_l2p_get(g_dev, i);
if (i % 2 == 0) {
CU_ASSERT_TRUE(ftl_ppa_cached(ppa));
CU_ASSERT_EQUAL(ppa.offset, i);
} else {
CU_ASSERT_FALSE(ftl_ppa_cached(ppa));
}
}
clean_l2p();
}
int
main(int argc, char **argv)
{
CU_pSuite suite32 = NULL, suite64 = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite32 = CU_add_suite("ftl_ppa32_suite", setup_l2p_32bit, cleanup);
if (!suite32) {
CU_cleanup_registry();
return CU_get_error();
}
suite64 = CU_add_suite("ftl_ppa64_suite", setup_l2p_64bit, cleanup);
if (!suite64) {
CU_cleanup_registry();
return CU_get_error();
}
if (
CU_add_test(suite32, "test_ppa_pack",
test_ppa_pack) == NULL
|| CU_add_test(suite32, "test_ppa32_invalid",
test_ppa_invalid) == NULL
|| CU_add_test(suite32, "test_ppa32_trans",
test_ppa_trans) == NULL
|| CU_add_test(suite32, "test_ppa32_cached",
test_ppa_cached) == NULL
|| CU_add_test(suite64, "test_ppa64_invalid",
test_ppa_invalid) == NULL
|| CU_add_test(suite64, "test_ppa64_trans",
test_ppa_trans) == NULL
|| CU_add_test(suite64, "test_ppa64_cached",
test_ppa_cached) == NULL
) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -0,0 +1 @@
ftl_rwb_ut

View File

@ -0,0 +1,38 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = ftl_rwb_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -0,0 +1,421 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/stdinc.h"
#include "spdk_cunit.h"
#include "common/lib/test_env.c"
#include "ftl/ftl_rwb.c"
#define RWB_SIZE (1024 * 1024)
#define RWB_ENTRY_COUNT (RWB_SIZE / FTL_BLOCK_SIZE)
#define XFER_SIZE 16
#define METADATA_SIZE 64
static struct ftl_rwb *g_rwb;
static void
setup_rwb(void)
{
struct spdk_ftl_conf conf = { .rwb_size = RWB_SIZE };
g_rwb = ftl_rwb_init(&conf, XFER_SIZE, METADATA_SIZE);
SPDK_CU_ASSERT_FATAL(g_rwb != NULL);
}
static void
cleanup_rwb(void)
{
ftl_rwb_free(g_rwb);
g_rwb = NULL;
}
static void
test_rwb_acquire(void)
{
size_t i;
struct ftl_rwb_entry *entry;
setup_rwb();
/* Verify that it's possible to acquire all of the entries */
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
SPDK_CU_ASSERT_FATAL(entry);
ftl_rwb_push(entry);
}
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
CU_ASSERT_PTR_NULL(entry);
cleanup_rwb();
}
static void
test_rwb_pop(void)
{
struct ftl_rwb_entry *entry;
struct ftl_rwb_batch *batch;
size_t entry_count, i;
setup_rwb();
/* Acquire all entries */
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
SPDK_CU_ASSERT_FATAL(entry);
entry->lba = i;
ftl_rwb_push(entry);
}
/* Pop all batches and free them */
for (i = 0; i < RWB_ENTRY_COUNT / XFER_SIZE; ++i) {
batch = ftl_rwb_pop(g_rwb);
SPDK_CU_ASSERT_FATAL(batch);
entry_count = 0;
ftl_rwb_foreach(entry, batch) {
CU_ASSERT_EQUAL(entry->lba, i * XFER_SIZE + entry_count);
entry_count++;
}
CU_ASSERT_EQUAL(entry_count, XFER_SIZE);
ftl_rwb_batch_release(batch);
}
/* Acquire all entries once more */
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
SPDK_CU_ASSERT_FATAL(entry);
ftl_rwb_push(entry);
}
/* Pop one batch and check we can acquire XFER_SIZE entries */
batch = ftl_rwb_pop(g_rwb);
SPDK_CU_ASSERT_FATAL(batch);
ftl_rwb_batch_release(batch);
for (i = 0; i < XFER_SIZE; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
SPDK_CU_ASSERT_FATAL(entry);
ftl_rwb_push(entry);
}
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
CU_ASSERT_PTR_NULL(entry);
cleanup_rwb();
}
static void
test_rwb_batch_revert(void)
{
struct ftl_rwb_batch *batch;
struct ftl_rwb_entry *entry;
size_t i;
setup_rwb();
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
SPDK_CU_ASSERT_FATAL(entry);
ftl_rwb_push(entry);
}
/* Pop one batch and revert it */
batch = ftl_rwb_pop(g_rwb);
SPDK_CU_ASSERT_FATAL(batch);
ftl_rwb_batch_revert(batch);
/* Verify all of the batches */
for (i = 0; i < RWB_ENTRY_COUNT / XFER_SIZE; ++i) {
batch = ftl_rwb_pop(g_rwb);
CU_ASSERT_PTR_NOT_NULL_FATAL(batch);
}
cleanup_rwb();
}
static void
test_rwb_entry_from_offset(void)
{
struct ftl_rwb_entry *entry;
struct ftl_ppa ppa = { .cached = 1 };
size_t i;
setup_rwb();
for (i = 0; i < RWB_ENTRY_COUNT; ++i) {
ppa.offset = i;
entry = ftl_rwb_entry_from_offset(g_rwb, i);
CU_ASSERT_EQUAL(ppa.offset, entry->pos);
}
cleanup_rwb();
}
static void *
test_rwb_worker(void *ctx)
{
#define ENTRIES_PER_WORKER (16 * RWB_ENTRY_COUNT)
struct ftl_rwb_entry *entry;
bool *done = ctx;
size_t i;
for (i = 0; i < ENTRIES_PER_WORKER; ++i) {
while (1) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
if (entry) {
entry->flags = 0;
ftl_rwb_push(entry);
break;
} else {
/* Allow other threads to run under valgrind */
pthread_yield();
}
}
}
__atomic_store_n(done, true, __ATOMIC_SEQ_CST);
return NULL;
}
static void
test_rwb_parallel(void)
{
struct ftl_rwb_batch *batch;
struct ftl_rwb_entry *entry;
#define NUM_PARALLEL_WORKERS 4
pthread_t workers[NUM_PARALLEL_WORKERS];
bool done[NUM_PARALLEL_WORKERS];
size_t i, num_entries = 0;
int rc, num_done;
setup_rwb();
for (i = 0; i < NUM_PARALLEL_WORKERS; ++i) {
done[i] = false;
rc = pthread_create(&workers[i], NULL, test_rwb_worker, (void *)&done[i]);
CU_ASSERT_TRUE(rc == 0);
}
while (1) {
batch = ftl_rwb_pop(g_rwb);
if (batch) {
ftl_rwb_foreach(entry, batch) {
num_entries++;
}
ftl_rwb_batch_release(batch);
} else {
num_done = 0;
for (i = 0; i < NUM_PARALLEL_WORKERS; ++i) {
if (__atomic_load_n(&done[i], __ATOMIC_SEQ_CST)) {
num_done++;
continue;
}
}
if (num_done == NUM_PARALLEL_WORKERS) {
for (i = 0; i < NUM_PARALLEL_WORKERS; ++i) {
pthread_join(workers[i], NULL);
}
break;
}
/* Allow other threads to run under valgrind */
pthread_yield();
}
}
CU_ASSERT_TRUE(num_entries == NUM_PARALLEL_WORKERS * ENTRIES_PER_WORKER);
cleanup_rwb();
}
static void
test_rwb_limits_base(void)
{
struct ftl_rwb_entry *entry;
size_t limits[FTL_RWB_TYPE_MAX];
setup_rwb();
ftl_rwb_get_limits(g_rwb, limits);
CU_ASSERT_TRUE(limits[FTL_RWB_TYPE_INTERNAL] == ftl_rwb_entry_cnt(g_rwb));
CU_ASSERT_TRUE(limits[FTL_RWB_TYPE_USER] == ftl_rwb_entry_cnt(g_rwb));
/* Verify it's possible to acquire both type of entries */
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
cleanup_rwb();
}
static void
test_rwb_limits_set(void)
{
size_t limits[FTL_RWB_TYPE_MAX], check[FTL_RWB_TYPE_MAX];
size_t i;
setup_rwb();
/* Check valid limits */
ftl_rwb_get_limits(g_rwb, limits);
memcpy(check, limits, sizeof(limits));
ftl_rwb_set_limits(g_rwb, limits);
ftl_rwb_get_limits(g_rwb, limits);
CU_ASSERT(memcmp(check, limits, sizeof(limits)) == 0);
for (i = 0; i < FTL_RWB_TYPE_MAX; ++i) {
ftl_rwb_get_limits(g_rwb, limits);
limits[i] = 0;
}
memcpy(check, limits, sizeof(limits));
ftl_rwb_set_limits(g_rwb, limits);
ftl_rwb_get_limits(g_rwb, limits);
CU_ASSERT(memcmp(check, limits, sizeof(limits)) == 0);
cleanup_rwb();
}
static void
test_rwb_limits_applied(void)
{
struct ftl_rwb_entry *entry;
struct ftl_rwb_batch *batch;
size_t limits[FTL_RWB_TYPE_MAX];
size_t i;
setup_rwb();
/* Check that it's impossible to acquire any entries when the limits are */
/* set to 0 */
ftl_rwb_get_limits(g_rwb, limits);
limits[FTL_RWB_TYPE_USER] = 0;
ftl_rwb_set_limits(g_rwb, limits);
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
CU_ASSERT_PTR_NULL(entry);
limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
limits[FTL_RWB_TYPE_INTERNAL] = 0;
ftl_rwb_set_limits(g_rwb, limits);
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
CU_ASSERT_PTR_NULL(entry);
/* Check positive limits */
#define TEST_LIMIT XFER_SIZE
limits[FTL_RWB_TYPE_USER] = ftl_rwb_entry_cnt(g_rwb);
limits[FTL_RWB_TYPE_INTERNAL] = TEST_LIMIT;
ftl_rwb_set_limits(g_rwb, limits);
for (i = 0; i < TEST_LIMIT; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
SPDK_CU_ASSERT_FATAL(entry);
entry->flags = FTL_IO_INTERNAL;
ftl_rwb_push(entry);
}
/* Now we expect null, since we've reached threshold */
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
CU_ASSERT_PTR_NULL(entry);
/* Complete the entries and check we can retrieve the entries once again */
batch = ftl_rwb_pop(g_rwb);
SPDK_CU_ASSERT_FATAL(batch);
ftl_rwb_batch_release(batch);
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
CU_ASSERT_PTR_NOT_NULL_FATAL(entry);
entry->flags = FTL_IO_INTERNAL;
/* Set the same limit but this time for user entries */
limits[FTL_RWB_TYPE_USER] = TEST_LIMIT;
limits[FTL_RWB_TYPE_INTERNAL] = ftl_rwb_entry_cnt(g_rwb);
ftl_rwb_set_limits(g_rwb, limits);
for (i = 0; i < TEST_LIMIT; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
SPDK_CU_ASSERT_FATAL(entry);
ftl_rwb_push(entry);
}
/* Now we expect null, since we've reached threshold */
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_USER);
CU_ASSERT_PTR_NULL(entry);
/* Check that we're still able to acquire a number of internal entries */
/* while the user entires are being throttled */
for (i = 0; i < TEST_LIMIT; ++i) {
entry = ftl_rwb_acquire(g_rwb, FTL_RWB_TYPE_INTERNAL);
SPDK_CU_ASSERT_FATAL(entry);
}
cleanup_rwb();
}
int
main(int argc, char **argv)
{
CU_pSuite suite;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("ftl_rwb_suite", NULL, NULL);
if (!suite) {
CU_cleanup_registry();
return CU_get_error();
}
if (
CU_add_test(suite, "test_rwb_acquire",
test_rwb_acquire) == NULL
|| CU_add_test(suite, "test_rwb_pop",
test_rwb_pop) == NULL
|| CU_add_test(suite, "test_rwb_batch_revert",
test_rwb_batch_revert) == NULL
|| CU_add_test(suite, "test_rwb_entry_from_offset",
test_rwb_entry_from_offset) == NULL
|| CU_add_test(suite, "test_rwb_parallel",
test_rwb_parallel) == NULL
|| CU_add_test(suite, "test_rwb_limits_base",
test_rwb_limits_base) == NULL
|| CU_add_test(suite, "test_rwb_limits_set",
test_rwb_limits_set) == NULL
|| CU_add_test(suite, "test_rwb_limits_applied",
test_rwb_limits_applied) == NULL
) {
CU_cleanup_registry();
return CU_get_error();
}
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -144,6 +144,12 @@ if [ $(uname -s) = Linux ]; then
$valgrind $testdir/lib/vhost/vhost.c/vhost_ut
fi
if grep -q '#define SPDK_CONFIG_FTL 1' $rootdir/include/spdk/config.h; then
$valgrind $testdir/lib/ftl/ftl_rwb.c/ftl_rwb_ut
$valgrind $testdir/lib/ftl/ftl_ppa/ftl_ppa_ut
$valgrind $testdir/lib/ftl/ftl_band.c/ftl_band_ut
fi
# local unit test coverage
if [ "$cov_avail" = "yes" ]; then
$LCOV -q -d . -c -t "$(hostname)" -o $UT_COVERAGE/ut_cov_test.info