lib/ftl: IO completion queue associated with io channel

Do not complete user IO writes immediately but put them to IO
channel completion queue. This allow to complete IO requests in
batches and reduce need of unnecessary message sending in bdev layer.

Change-Id: I1bd1e076ff31e9e5faef70f9d0b3512d8f3a31b7
Signed-off-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/542
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Wojciech Malikowski 2019-11-21 10:20:24 +01:00 committed by Tomasz Zawadzki
parent a7a0d02d8b
commit 68cd46b622
4 changed files with 56 additions and 12 deletions

View File

@ -948,7 +948,7 @@ ftl_add_to_retry_queue(struct ftl_io *io)
{
if (!(io->flags & FTL_IO_RETRY)) {
io->flags |= FTL_IO_RETRY;
TAILQ_INSERT_TAIL(&io->dev->retry_queue, io, retry_entry);
TAILQ_INSERT_TAIL(&io->dev->retry_queue, io, tailq_entry);
}
}
@ -1641,7 +1641,7 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
/* There are no guarantees of the order of completion of NVMe IO submission queue */
/* so wait until zone is not busy before submitting another write */
if (!ftl_is_append_supported(dev) && wptr->zone->busy) {
TAILQ_INSERT_TAIL(&wptr->pending_queue, io, retry_entry);
TAILQ_INSERT_TAIL(&wptr->pending_queue, io, tailq_entry);
rc = -EAGAIN;
break;
}
@ -1649,7 +1649,7 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)
rc = ftl_submit_child_write(wptr, io, dev->xfer_size);
if (spdk_unlikely(rc)) {
if (rc == -EAGAIN) {
TAILQ_INSERT_TAIL(&wptr->pending_queue, io, retry_entry);
TAILQ_INSERT_TAIL(&wptr->pending_queue, io, tailq_entry);
} else {
ftl_io_fail(io, rc);
}
@ -1700,7 +1700,7 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
if (spdk_unlikely(!TAILQ_EMPTY(&wptr->pending_queue))) {
io = TAILQ_FIRST(&wptr->pending_queue);
TAILQ_REMOVE(&wptr->pending_queue, io, retry_entry);
TAILQ_REMOVE(&wptr->pending_queue, io, tailq_entry);
if (ftl_submit_write(wptr, io) == -EAGAIN) {
return 0;
@ -1813,6 +1813,7 @@ static int
ftl_rwb_fill(struct ftl_io *io)
{
struct spdk_ftl_dev *dev = io->dev;
struct ftl_io_channel *ioch;
struct ftl_rwb_entry *entry;
struct ftl_addr addr = { .cached = 1 };
int flags = ftl_rwb_flags_from_io(io);
@ -1846,7 +1847,8 @@ ftl_rwb_fill(struct ftl_io *io)
if (ftl_dev_has_nv_cache(dev) && !(io->flags & FTL_IO_BYPASS_CACHE)) {
ftl_write_nv_cache(io);
} else {
ftl_io_complete(io);
ioch = spdk_io_channel_get_ctx(io->ioch);
TAILQ_INSERT_TAIL(&ioch->write_cmpl_queue, io, tailq_entry);
}
}
@ -2283,7 +2285,7 @@ ftl_process_retry_queue(struct spdk_ftl_dev *dev)
}
io->flags &= ~FTL_IO_RETRY;
TAILQ_REMOVE(&dev->retry_queue, io, retry_entry);
TAILQ_REMOVE(&dev->retry_queue, io, tailq_entry);
if (ftl_io_done(io)) {
ftl_io_complete(io);
@ -2291,6 +2293,25 @@ ftl_process_retry_queue(struct spdk_ftl_dev *dev)
}
}
int
ftl_io_channel_poll(void *arg)
{
struct ftl_io_channel *ch = arg;
struct ftl_io *io;
if (TAILQ_EMPTY(&ch->write_cmpl_queue)) {
return 0;
}
while (!TAILQ_EMPTY(&ch->write_cmpl_queue)) {
io = TAILQ_FIRST(&ch->write_cmpl_queue);
TAILQ_REMOVE(&ch->write_cmpl_queue, io, tailq_entry);
ftl_io_complete(io);
}
return 1;
}
int
ftl_task_core(void *ctx)
{

View File

@ -267,6 +267,7 @@ int ftl_nv_cache_write_header(struct ftl_nv_cache *nv_cache, bool shutdown,
int ftl_nv_cache_scrub(struct ftl_nv_cache *nv_cache, spdk_bdev_io_completion_cb cb_fn,
void *cb_arg);
void ftl_get_media_events(struct spdk_ftl_dev *dev);
int ftl_io_channel_poll(void *arg);
struct spdk_io_channel *
ftl_get_io_channel(const struct spdk_ftl_dev *dev);

View File

@ -984,21 +984,36 @@ ftl_io_channel_create_cb(void *io_device, void *ctx)
ioch->base_ioch = spdk_bdev_get_io_channel(dev->base_bdev_desc);
if (!ioch->base_ioch) {
SPDK_ERRLOG("Failed to create base bdev IO channel\n");
spdk_mempool_free(ioch->io_pool);
return -1;
goto fail_ioch;
}
if (ftl_dev_has_nv_cache(dev)) {
ioch->cache_ioch = spdk_bdev_get_io_channel(dev->nv_cache.bdev_desc);
if (!ioch->cache_ioch) {
SPDK_ERRLOG("Failed to create cache IO channel\n");
spdk_mempool_free(ioch->io_pool);
spdk_put_io_channel(ioch->base_ioch);
return -1;
goto fail_cache;
}
}
TAILQ_INIT(&ioch->write_cmpl_queue);
ioch->poller = spdk_poller_register(ftl_io_channel_poll, ioch, 0);
if (!ioch->poller) {
SPDK_ERRLOG("Failed to register IO channel poller\n");
goto fail_poller;
}
return 0;
fail_poller:
if (ioch->cache_ioch) {
spdk_put_io_channel(ioch->cache_ioch);
}
fail_cache:
spdk_put_io_channel(ioch->base_ioch);
fail_ioch:
spdk_mempool_free(ioch->io_pool);
return -1;
}
static void
@ -1006,6 +1021,8 @@ ftl_io_channel_destroy_cb(void *io_device, void *ctx)
{
struct ftl_io_channel *ioch = ctx;
spdk_poller_unregister(&ioch->poller);
spdk_mempool_free(ioch->io_pool);
spdk_put_io_channel(ioch->base_ioch);

View File

@ -134,6 +134,10 @@ struct ftl_io_channel {
struct spdk_io_channel *base_ioch;
/* Persistent cache IO channel */
struct spdk_io_channel *cache_ioch;
/* Poller used for completing write requests */
struct spdk_poller *poller;
/* Write completion queue */
TAILQ_HEAD(, ftl_io) write_cmpl_queue;
};
/* General IO descriptor */
@ -218,7 +222,8 @@ struct ftl_io {
/* Trace group id */
uint64_t trace;
TAILQ_ENTRY(ftl_io) retry_entry;
/* Used by retry and write completion queues */
TAILQ_ENTRY(ftl_io) tailq_entry;
};
/* Metadata IO */