lib/ftl: IO channel array

The IO channel pointers are now stored inside an array of the device
they belong to.  Once write buffer entries are tied to IO channels,
it'll provide a method for dereferencing an entry from its address.

Change-Id: Iaf401525eb0f5af8dc6047a1dc8bae11b56761d7
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/901
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Konrad Sztyber 2020-01-24 12:02:21 +01:00 committed by Tomasz Zawadzki
parent 085bfa0d13
commit bfafd4e472
6 changed files with 179 additions and 20 deletions

View File

@ -96,6 +96,9 @@ struct spdk_ftl_conf {
/* Use append instead of write */
bool use_append;
/* Maximum supported number of IO channels */
uint32_t max_io_channels;
struct {
/* Maximum number of concurrent requests */
size_t max_request_cnt;

View File

@ -861,7 +861,8 @@ ftl_shutdown_complete(struct spdk_ftl_dev *dev)
struct ftl_io_channel *ioch = ftl_io_channel_get_ctx(dev->ioch);
return !__atomic_load_n(&dev->num_inflight, __ATOMIC_SEQ_CST) &&
LIST_EMPTY(&dev->wptr_list) && TAILQ_EMPTY(&ioch->retry_queue);
dev->num_io_channels == 1 && LIST_EMPTY(&dev->wptr_list) &&
TAILQ_EMPTY(&ioch->retry_queue);
}
void

View File

@ -199,6 +199,16 @@ struct spdk_ftl_dev {
/* Poller */
struct spdk_poller *core_poller;
/* IO channel array provides means for retrieving write buffer entries
* from their address stored in L2P. The address is divided into two
* parts - IO channel offset poining at specific IO channel (within this
* array) and entry offset pointing at specific entry within that IO
* channel.
*/
struct ftl_io_channel **ioch_array;
TAILQ_HEAD(, ftl_io_channel) ioch_queue;
uint64_t num_io_channels;
/* Devices' list */
STAILQ_ENTRY(spdk_ftl_dev) stailq;
};

View File

@ -121,6 +121,7 @@ static const struct spdk_ftl_conf g_default_conf = {
* will result in lost data after recovery.
*/
.allow_open_bands = false,
.max_io_channels = 128,
.nv_cache = {
/* Maximum number of concurrent requests */
.max_request_cnt = 2048,
@ -535,16 +536,6 @@ ftl_dev_init_core_thread(struct spdk_ftl_dev *dev, const struct spdk_ftl_dev_ini
return 0;
}
static void
ftl_dev_free_thread(struct spdk_ftl_dev *dev)
{
assert(dev->core_poller == NULL);
spdk_put_io_channel(dev->ioch);
dev->core_thread = NULL;
dev->ioch = NULL;
}
static int
ftl_dev_l2p_alloc(struct spdk_ftl_dev *dev)
{
@ -957,15 +948,42 @@ ftl_io_channel_get_ctx(struct spdk_io_channel *ioch)
return _ioch->ioch;
}
static void
ftl_io_channel_register(void *ctx)
{
struct ftl_io_channel *ioch = ctx;
struct spdk_ftl_dev *dev = ioch->dev;
uint32_t ioch_index;
for (ioch_index = 0; ioch_index < dev->conf.max_io_channels; ++ioch_index) {
if (dev->ioch_array[ioch_index] == NULL) {
dev->ioch_array[ioch_index] = ioch;
ioch->index = ioch_index;
break;
}
}
assert(ioch_index < dev->conf.max_io_channels);
TAILQ_INSERT_TAIL(&dev->ioch_queue, ioch, tailq);
}
static int
ftl_io_channel_create_cb(void *io_device, void *ctx)
{
struct spdk_ftl_dev *dev = io_device;
struct _ftl_io_channel *_ioch = ctx;
struct ftl_io_channel *ioch;
uint32_t num_io_channels;
char mempool_name[32];
int rc;
num_io_channels = __atomic_fetch_add(&dev->num_io_channels, 1, __ATOMIC_SEQ_CST);
if (num_io_channels >= dev->conf.max_io_channels) {
SPDK_ERRLOG("Reached maximum number of IO channels\n");
__atomic_fetch_sub(&dev->num_io_channels, 1, __ATOMIC_SEQ_CST);
return -1;
}
ioch = calloc(1, sizeof(*ioch));
if (ioch == NULL) {
SPDK_ERRLOG("Failed to allocate IO channel\n");
@ -1016,6 +1034,9 @@ ftl_io_channel_create_cb(void *io_device, void *ctx)
}
_ioch->ioch = ioch;
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_io_channel_register, ioch);
return 0;
fail_poller:
@ -1032,27 +1053,58 @@ fail_ioch:
}
static void
ftl_io_channel_unregister(void *ctx)
{
struct ftl_io_channel *ioch = ctx;
struct spdk_ftl_dev *dev = ioch->dev;
uint32_t num_io_channels __attribute__((unused));
assert(ioch->index < dev->conf.max_io_channels);
assert(dev->ioch_array[ioch->index] == ioch);
dev->ioch_array[ioch->index] = NULL;
TAILQ_REMOVE(&dev->ioch_queue, ioch, tailq);
num_io_channels = __atomic_fetch_sub(&dev->num_io_channels, 1, __ATOMIC_SEQ_CST);
assert(num_io_channels > 0);
spdk_mempool_free(ioch->io_pool);
free(ioch);
}
static void
ftl_io_channel_destroy_cb(void *io_device, void *ctx)
{
struct _ftl_io_channel *_ioch = ctx;
struct ftl_io_channel *ioch = _ioch->ioch;
struct spdk_ftl_dev *dev = ioch->dev;
spdk_poller_unregister(&ioch->poller);
spdk_mempool_free(ioch->io_pool);
spdk_put_io_channel(ioch->base_ioch);
if (ioch->cache_ioch) {
spdk_put_io_channel(ioch->cache_ioch);
}
free(ioch);
ioch->base_ioch = NULL;
ioch->cache_ioch = NULL;
spdk_thread_send_msg(ftl_get_core_thread(dev), ftl_io_channel_unregister, ioch);
}
static int
ftl_dev_init_io_channel(struct spdk_ftl_dev *dev)
{
dev->ioch_array = calloc(dev->conf.max_io_channels, sizeof(*dev->ioch_array));
if (!dev->ioch_array) {
SPDK_ERRLOG("Failed to allocate IO channel array\n");
return -1;
}
TAILQ_INIT(&dev->ioch_queue);
dev->num_io_channels = 0;
spdk_io_device_register(dev, ftl_io_channel_create_cb, ftl_io_channel_destroy_cb,
sizeof(struct _ftl_io_channel),
NULL);
@ -1168,10 +1220,6 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
spdk_io_device_unregister(dev, NULL);
if (dev->core_thread) {
ftl_dev_free_thread(dev);
}
if (dev->bands) {
for (i = 0; i < ftl_get_num_bands(dev); ++i) {
free(dev->bands[i].zone_buf);
@ -1196,6 +1244,8 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
ftl_release_bdev(dev->nv_cache.bdev_desc);
ftl_release_bdev(dev->base_bdev_desc);
assert(dev->num_io_channels == 0);
free(dev->ioch_array);
free(dev->name);
free(dev->bands);
free(dev->l2p);
@ -1321,6 +1371,13 @@ static void
ftl_halt_complete_cb(void *ctx)
{
struct ftl_dev_init_ctx *fini_ctx = ctx;
struct spdk_ftl_dev *dev = fini_ctx->dev;
/* Make sure core IO channel has already been released */
if (dev->num_io_channels > 0) {
spdk_thread_send_msg(spdk_get_thread(), ftl_halt_complete_cb, ctx);
return;
}
ftl_dev_free_sync(fini_ctx->dev);
if (fini_ctx->cb_fn != NULL) {
@ -1330,6 +1387,16 @@ ftl_halt_complete_cb(void *ctx)
ftl_dev_free_init_ctx(fini_ctx);
}
static void
ftl_put_io_channel_cb(void *ctx)
{
struct ftl_dev_init_ctx *fini_ctx = ctx;
struct spdk_ftl_dev *dev = fini_ctx->dev;
spdk_put_io_channel(dev->ioch);
spdk_thread_send_msg(spdk_get_thread(), ftl_halt_complete_cb, ctx);
}
static void
ftl_nv_cache_header_fini_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
@ -1343,7 +1410,7 @@ ftl_nv_cache_header_fini_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb
}
fini_ctx->halt_complete_status = rc;
spdk_thread_send_msg(fini_ctx->thread, ftl_halt_complete_cb, fini_ctx);
spdk_thread_send_msg(fini_ctx->thread, ftl_put_io_channel_cb, fini_ctx);
}
static int
@ -1360,7 +1427,7 @@ ftl_halt_poller(void *ctx)
ftl_nv_cache_header_fini_cb, fini_ctx);
} else {
fini_ctx->halt_complete_status = 0;
spdk_thread_send_msg(fini_ctx->thread, ftl_halt_complete_cb, fini_ctx);
spdk_thread_send_msg(fini_ctx->thread, ftl_put_io_channel_cb, fini_ctx);
}
}

View File

@ -129,6 +129,8 @@ struct ftl_io_channel {
struct spdk_ftl_dev *dev;
/* IO pool element size */
size_t elem_size;
/* Index within the IO channel array */
uint64_t index;
/* IO pool */
struct spdk_mempool *io_pool;
/* Underlying device IO channel */
@ -140,6 +142,7 @@ struct ftl_io_channel {
/* Write completion queue */
TAILQ_HEAD(, ftl_io) write_cmpl_queue;
TAILQ_HEAD(, ftl_io) retry_queue;
TAILQ_ENTRY(ftl_io_channel) tailq;
};
/* General IO descriptor */

View File

@ -75,6 +75,7 @@ setup_device(uint32_t num_threads)
dev = calloc(1, sizeof(*dev));
SPDK_CU_ASSERT_FATAL(dev != NULL);
dev->core_thread = spdk_get_thread();
dev->ioch = calloc(1, sizeof(*_ioch) + sizeof(struct spdk_io_channel));
SPDK_CU_ASSERT_FATAL(dev->ioch != NULL);
@ -110,6 +111,7 @@ free_device(struct spdk_ftl_dev *dev)
spdk_io_device_unregister(dev->base_bdev_desc, NULL);
free_threads();
free(dev->ioch_array);
free(dev->ioch);
free(dev);
}
@ -541,6 +543,77 @@ test_multi_generation(void)
free_device(dev);
}
static void
test_io_channel_create(void)
{
struct spdk_ftl_dev *dev;
struct spdk_io_channel *ioch, **ioch_array;
struct ftl_io_channel *ftl_ioch;
uint32_t ioch_idx;
dev = setup_device(g_default_conf.max_io_channels + 1);
ioch = spdk_get_io_channel(dev);
CU_ASSERT(ioch != NULL);
CU_ASSERT_EQUAL(dev->num_io_channels, 1);
spdk_put_io_channel(ioch);
poll_threads();
CU_ASSERT_EQUAL(dev->num_io_channels, 0);
ioch_array = calloc(dev->conf.max_io_channels, sizeof(*ioch_array));
SPDK_CU_ASSERT_FATAL(ioch != NULL);
for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ++ioch_idx) {
set_thread(ioch_idx);
ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
SPDK_CU_ASSERT_FATAL(ioch != NULL);
poll_threads();
ftl_ioch = ftl_io_channel_get_ctx(ioch);
CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
}
CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
set_thread(dev->conf.max_io_channels);
ioch = spdk_get_io_channel(dev);
CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels);
CU_ASSERT_EQUAL(ioch, NULL);
for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx += 2) {
set_thread(ioch_idx);
spdk_put_io_channel(ioch_array[ioch_idx]);
ioch_array[ioch_idx] = NULL;
poll_threads();
}
poll_threads();
CU_ASSERT_EQUAL(dev->num_io_channels, dev->conf.max_io_channels / 2);
for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
set_thread(ioch_idx);
if (ioch_array[ioch_idx] == NULL) {
ioch = ioch_array[ioch_idx] = spdk_get_io_channel(dev);
SPDK_CU_ASSERT_FATAL(ioch != NULL);
poll_threads();
ftl_ioch = ftl_io_channel_get_ctx(ioch);
CU_ASSERT_EQUAL(ftl_ioch->index, ioch_idx);
}
}
for (ioch_idx = 0; ioch_idx < dev->conf.max_io_channels; ioch_idx++) {
set_thread(ioch_idx);
spdk_put_io_channel(ioch_array[ioch_idx]);
}
poll_threads();
CU_ASSERT_EQUAL(dev->num_io_channels, 0);
free(ioch_array);
free_device(dev);
}
int
main(int argc, char **argv)
{
@ -568,6 +641,8 @@ main(int argc, char **argv)
test_child_status) == NULL
|| CU_add_test(suite, "test_multi_generation",
test_multi_generation) == NULL
|| CU_add_test(suite, "test_io_channel_create",
test_io_channel_create) == NULL
) {
CU_cleanup_registry();