bdev: rename bdev_module_channel to bdev_shared_resource

It doesn't represent an io_channel, so lets
rename it to something less confusing.

Change-Id: I730eb0ff0eb7737a59c190812a25b327ccd8fc14
Suggested-by: Ben Walker <benjamin.walker@intel.com>
Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/409998
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Dariusz Stojaczyk 2018-05-04 10:10:52 +02:00 committed by Daniel Verkamp
parent 2c200379f2
commit cc8bb51b24
2 changed files with 106 additions and 104 deletions

View File

@ -139,15 +139,15 @@ struct spdk_bdev_mgmt_channel {
bdev_io_stailq_t per_thread_cache; bdev_io_stailq_t per_thread_cache;
uint32_t per_thread_cache_count; uint32_t per_thread_cache_count;
TAILQ_HEAD(, spdk_bdev_module_channel) module_channels; TAILQ_HEAD(, spdk_bdev_shared_resource) shared_resources;
}; };
/* /*
* Per-module (or per-io_device) channel. Multiple bdevs built on the same io_device * Per-module (or per-io_device) data. Multiple bdevs built on the same io_device
* will queue here their IO that awaits retry. It makes it posible to retry sending * will queue here their IO that awaits retry. It makes it posible to retry sending
* IO to one bdev after IO from other bdev completes. * IO to one bdev after IO from other bdev completes.
*/ */
struct spdk_bdev_module_channel { struct spdk_bdev_shared_resource {
/* The bdev management channel */ /* The bdev management channel */
struct spdk_bdev_mgmt_channel *mgmt_ch; struct spdk_bdev_mgmt_channel *mgmt_ch;
@ -169,12 +169,12 @@ struct spdk_bdev_module_channel {
uint64_t nomem_threshold; uint64_t nomem_threshold;
/* I/O channel allocated by a bdev module */ /* I/O channel allocated by a bdev module */
struct spdk_io_channel *module_ch; struct spdk_io_channel *shared_ch;
/* Refcount of bdev channels using this channel */ /* Refcount of bdev channels using this resource */
uint32_t ref; uint32_t ref;
TAILQ_ENTRY(spdk_bdev_module_channel) link; TAILQ_ENTRY(spdk_bdev_shared_resource) link;
}; };
#define BDEV_CH_RESET_IN_PROGRESS (1 << 0) #define BDEV_CH_RESET_IN_PROGRESS (1 << 0)
@ -186,8 +186,8 @@ struct spdk_bdev_channel {
/* The channel for the underlying device */ /* The channel for the underlying device */
struct spdk_io_channel *channel; struct spdk_io_channel *channel;
/* Channel for the bdev module */ /* Per io_device per thread data */
struct spdk_bdev_module_channel *module_ch; struct spdk_bdev_shared_resource *shared_resource;
struct spdk_bdev_io_stat stat; struct spdk_bdev_io_stat stat;
@ -338,7 +338,7 @@ spdk_bdev_io_put_buf(struct spdk_bdev_io *bdev_io)
assert(bdev_io->u.bdev.iovcnt == 1); assert(bdev_io->u.bdev.iovcnt == 1);
buf = bdev_io->buf; buf = bdev_io->buf;
ch = bdev_io->ch->module_ch->mgmt_ch; ch = bdev_io->ch->shared_resource->mgmt_ch;
if (bdev_io->buf_len <= SPDK_BDEV_SMALL_BUF_MAX_SIZE) { if (bdev_io->buf_len <= SPDK_BDEV_SMALL_BUF_MAX_SIZE) {
pool = g_bdev_mgr.buf_small_pool; pool = g_bdev_mgr.buf_small_pool;
@ -375,7 +375,7 @@ spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, u
} }
assert(len <= SPDK_BDEV_LARGE_BUF_MAX_SIZE); assert(len <= SPDK_BDEV_LARGE_BUF_MAX_SIZE);
mgmt_ch = bdev_io->ch->module_ch->mgmt_ch; mgmt_ch = bdev_io->ch->shared_resource->mgmt_ch;
bdev_io->buf_len = len; bdev_io->buf_len = len;
bdev_io->get_buf_cb = cb; bdev_io->get_buf_cb = cb;
@ -457,7 +457,7 @@ spdk_bdev_mgmt_channel_create(void *io_device, void *ctx_buf)
STAILQ_INIT(&ch->per_thread_cache); STAILQ_INIT(&ch->per_thread_cache);
ch->per_thread_cache_count = 0; ch->per_thread_cache_count = 0;
TAILQ_INIT(&ch->module_channels); TAILQ_INIT(&ch->shared_resources);
return 0; return 0;
} }
@ -472,7 +472,7 @@ spdk_bdev_mgmt_channel_destroy(void *io_device, void *ctx_buf)
SPDK_ERRLOG("Pending I/O list wasn't empty on mgmt channel free\n"); SPDK_ERRLOG("Pending I/O list wasn't empty on mgmt channel free\n");
} }
if (!TAILQ_EMPTY(&ch->module_channels)) { if (!TAILQ_EMPTY(&ch->shared_resources)) {
SPDK_ERRLOG("Module channel list wasn't empty on mgmt channel free\n"); SPDK_ERRLOG("Module channel list wasn't empty on mgmt channel free\n");
} }
@ -808,7 +808,7 @@ spdk_bdev_finish(spdk_bdev_fini_cb cb_fn, void *cb_arg)
static struct spdk_bdev_io * static struct spdk_bdev_io *
spdk_bdev_get_io(struct spdk_bdev_channel *channel) spdk_bdev_get_io(struct spdk_bdev_channel *channel)
{ {
struct spdk_bdev_mgmt_channel *ch = channel->module_ch->mgmt_ch; struct spdk_bdev_mgmt_channel *ch = channel->shared_resource->mgmt_ch;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
if (ch->per_thread_cache_count > 0) { if (ch->per_thread_cache_count > 0) {
@ -829,7 +829,7 @@ spdk_bdev_get_io(struct spdk_bdev_channel *channel)
static void static void
spdk_bdev_put_io(struct spdk_bdev_io *bdev_io) spdk_bdev_put_io(struct spdk_bdev_io *bdev_io)
{ {
struct spdk_bdev_mgmt_channel *ch = bdev_io->ch->module_ch->mgmt_ch; struct spdk_bdev_mgmt_channel *ch = bdev_io->ch->shared_resource->mgmt_ch;
if (bdev_io->buf != NULL) { if (bdev_io->buf != NULL) {
spdk_bdev_io_put_buf(bdev_io); spdk_bdev_io_put_buf(bdev_io);
@ -849,7 +849,7 @@ _spdk_bdev_qos_io_submit(struct spdk_bdev_channel *ch)
struct spdk_bdev_io *bdev_io = NULL; struct spdk_bdev_io *bdev_io = NULL;
struct spdk_bdev *bdev = ch->bdev; struct spdk_bdev *bdev = ch->bdev;
struct spdk_bdev_qos *qos = bdev->qos; struct spdk_bdev_qos *qos = bdev->qos;
struct spdk_bdev_module_channel *module_ch = ch->module_ch; struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
while (!TAILQ_EMPTY(&qos->queued)) { while (!TAILQ_EMPTY(&qos->queued)) {
if (qos->io_submitted_this_timeslice < qos->max_ios_per_timeslice) { if (qos->io_submitted_this_timeslice < qos->max_ios_per_timeslice) {
@ -857,7 +857,7 @@ _spdk_bdev_qos_io_submit(struct spdk_bdev_channel *ch)
TAILQ_REMOVE(&qos->queued, bdev_io, link); TAILQ_REMOVE(&qos->queued, bdev_io, link);
qos->io_submitted_this_timeslice++; qos->io_submitted_this_timeslice++;
ch->io_outstanding++; ch->io_outstanding++;
module_ch->io_outstanding++; shared_resource->io_outstanding++;
bdev->fn_table->submit_request(ch->channel, bdev_io); bdev->fn_table->submit_request(ch->channel, bdev_io);
} else { } else {
break; break;
@ -872,25 +872,25 @@ _spdk_bdev_io_submit(void *ctx)
struct spdk_bdev *bdev = bdev_io->bdev; struct spdk_bdev *bdev = bdev_io->bdev;
struct spdk_bdev_channel *bdev_ch = bdev_io->ch; struct spdk_bdev_channel *bdev_ch = bdev_io->ch;
struct spdk_io_channel *ch = bdev_ch->channel; struct spdk_io_channel *ch = bdev_ch->channel;
struct spdk_bdev_module_channel *module_ch = bdev_ch->module_ch; struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
bdev_io->submit_tsc = spdk_get_ticks(); bdev_io->submit_tsc = spdk_get_ticks();
bdev_ch->io_outstanding++; bdev_ch->io_outstanding++;
module_ch->io_outstanding++; shared_resource->io_outstanding++;
bdev_io->in_submit_request = true; bdev_io->in_submit_request = true;
if (spdk_likely(bdev_ch->flags == 0)) { if (spdk_likely(bdev_ch->flags == 0)) {
if (spdk_likely(TAILQ_EMPTY(&module_ch->nomem_io))) { if (spdk_likely(TAILQ_EMPTY(&shared_resource->nomem_io))) {
bdev->fn_table->submit_request(ch, bdev_io); bdev->fn_table->submit_request(ch, bdev_io);
} else { } else {
bdev_ch->io_outstanding--; bdev_ch->io_outstanding--;
module_ch->io_outstanding--; shared_resource->io_outstanding--;
TAILQ_INSERT_TAIL(&module_ch->nomem_io, bdev_io, link); TAILQ_INSERT_TAIL(&shared_resource->nomem_io, bdev_io, link);
} }
} else if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) { } else if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
} else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) { } else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
bdev_ch->io_outstanding--; bdev_ch->io_outstanding--;
module_ch->io_outstanding--; shared_resource->io_outstanding--;
TAILQ_INSERT_TAIL(&bdev->qos->queued, bdev_io, link); TAILQ_INSERT_TAIL(&bdev->qos->queued, bdev_io, link);
_spdk_bdev_qos_io_submit(bdev_ch); _spdk_bdev_qos_io_submit(bdev_ch);
} else { } else {
@ -1005,7 +1005,7 @@ _spdk_bdev_channel_create(struct spdk_bdev_channel *ch, void *io_device)
struct spdk_bdev *bdev = __bdev_from_io_dev(io_device); struct spdk_bdev *bdev = __bdev_from_io_dev(io_device);
struct spdk_io_channel *mgmt_io_ch; struct spdk_io_channel *mgmt_io_ch;
struct spdk_bdev_mgmt_channel *mgmt_ch; struct spdk_bdev_mgmt_channel *mgmt_ch;
struct spdk_bdev_module_channel *module_ch; struct spdk_bdev_shared_resource *shared_resource;
ch->bdev = bdev; ch->bdev = bdev;
ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt); ch->channel = bdev->fn_table->get_io_channel(bdev->ctxt);
@ -1019,35 +1019,35 @@ _spdk_bdev_channel_create(struct spdk_bdev_channel *ch, void *io_device)
} }
mgmt_ch = spdk_io_channel_get_ctx(mgmt_io_ch); mgmt_ch = spdk_io_channel_get_ctx(mgmt_io_ch);
TAILQ_FOREACH(module_ch, &mgmt_ch->module_channels, link) { TAILQ_FOREACH(shared_resource, &mgmt_ch->shared_resources, link) {
if (module_ch->module_ch == ch->channel) { if (shared_resource->shared_ch == ch->channel) {
spdk_put_io_channel(mgmt_io_ch); spdk_put_io_channel(mgmt_io_ch);
module_ch->ref++; shared_resource->ref++;
break; break;
} }
} }
if (module_ch == NULL) { if (shared_resource == NULL) {
module_ch = calloc(1, sizeof(*module_ch)); shared_resource = calloc(1, sizeof(*shared_resource));
if (module_ch == NULL) { if (shared_resource == NULL) {
spdk_put_io_channel(mgmt_io_ch); spdk_put_io_channel(mgmt_io_ch);
return -1; return -1;
} }
module_ch->mgmt_ch = mgmt_ch; shared_resource->mgmt_ch = mgmt_ch;
module_ch->io_outstanding = 0; shared_resource->io_outstanding = 0;
TAILQ_INIT(&module_ch->nomem_io); TAILQ_INIT(&shared_resource->nomem_io);
module_ch->nomem_threshold = 0; shared_resource->nomem_threshold = 0;
module_ch->module_ch = ch->channel; shared_resource->shared_ch = ch->channel;
module_ch->ref = 1; shared_resource->ref = 1;
TAILQ_INSERT_TAIL(&mgmt_ch->module_channels, module_ch, link); TAILQ_INSERT_TAIL(&mgmt_ch->shared_resources, shared_resource, link);
} }
memset(&ch->stat, 0, sizeof(ch->stat)); memset(&ch->stat, 0, sizeof(ch->stat));
ch->io_outstanding = 0; ch->io_outstanding = 0;
TAILQ_INIT(&ch->queued_resets); TAILQ_INIT(&ch->queued_resets);
ch->flags = 0; ch->flags = 0;
ch->module_ch = module_ch; ch->shared_resource = shared_resource;
return 0; return 0;
} }
@ -1055,7 +1055,7 @@ _spdk_bdev_channel_create(struct spdk_bdev_channel *ch, void *io_device)
static void static void
_spdk_bdev_channel_destroy_resource(struct spdk_bdev_channel *ch) _spdk_bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
{ {
struct spdk_bdev_module_channel *module_ch; struct spdk_bdev_shared_resource *shared_resource;
if (!ch) { if (!ch) {
return; return;
@ -1067,15 +1067,16 @@ _spdk_bdev_channel_destroy_resource(struct spdk_bdev_channel *ch)
assert(ch->io_outstanding == 0); assert(ch->io_outstanding == 0);
module_ch = ch->module_ch; shared_resource = ch->shared_resource;
if (module_ch) { if (shared_resource) {
assert(module_ch->ref > 0); assert(ch->io_outstanding == 0);
module_ch->ref--; assert(shared_resource->ref > 0);
if (module_ch->ref == 0) { shared_resource->ref--;
assert(module_ch->io_outstanding == 0); if (shared_resource->ref == 0) {
spdk_put_io_channel(spdk_io_channel_from_ctx(module_ch->mgmt_ch)); assert(shared_resource->io_outstanding == 0);
TAILQ_REMOVE(&module_ch->mgmt_ch->module_channels, module_ch, link); spdk_put_io_channel(spdk_io_channel_from_ctx(shared_resource->mgmt_ch));
free(module_ch); TAILQ_REMOVE(&shared_resource->mgmt_ch->shared_resources, shared_resource, link);
free(shared_resource);
} }
} }
} }
@ -1222,7 +1223,7 @@ _spdk_bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
*/ */
if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) { if (bdev_io->type != SPDK_BDEV_IO_TYPE_RESET) {
ch->io_outstanding++; ch->io_outstanding++;
ch->module_ch->io_outstanding++; ch->shared_resource->io_outstanding++;
} }
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
} }
@ -1233,12 +1234,12 @@ static void
_spdk_bdev_channel_destroy(struct spdk_bdev_channel *ch) _spdk_bdev_channel_destroy(struct spdk_bdev_channel *ch)
{ {
struct spdk_bdev_mgmt_channel *mgmt_ch; struct spdk_bdev_mgmt_channel *mgmt_ch;
struct spdk_bdev_module_channel *module_ch = ch->module_ch; struct spdk_bdev_shared_resource *shared_resource = ch->shared_resource;
mgmt_ch = module_ch->mgmt_ch; mgmt_ch = shared_resource->mgmt_ch;
_spdk_bdev_abort_queued_io(&ch->queued_resets, ch); _spdk_bdev_abort_queued_io(&ch->queued_resets, ch);
_spdk_bdev_abort_queued_io(&module_ch->nomem_io, ch); _spdk_bdev_abort_queued_io(&shared_resource->nomem_io, ch);
_spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_small, ch); _spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_small, ch);
_spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_large, ch); _spdk_bdev_abort_buf_io(&mgmt_ch->need_buf_large, ch);
@ -1907,16 +1908,16 @@ _spdk_bdev_reset_freeze_channel(struct spdk_io_channel_iter *i)
struct spdk_io_channel *ch; struct spdk_io_channel *ch;
struct spdk_bdev_channel *channel; struct spdk_bdev_channel *channel;
struct spdk_bdev_mgmt_channel *mgmt_channel; struct spdk_bdev_mgmt_channel *mgmt_channel;
struct spdk_bdev_module_channel *module_ch; struct spdk_bdev_shared_resource *shared_resource;
ch = spdk_io_channel_iter_get_channel(i); ch = spdk_io_channel_iter_get_channel(i);
channel = spdk_io_channel_get_ctx(ch); channel = spdk_io_channel_get_ctx(ch);
module_ch = channel->module_ch; shared_resource = channel->shared_resource;
mgmt_channel = module_ch->mgmt_ch; mgmt_channel = shared_resource->mgmt_ch;
channel->flags |= BDEV_CH_RESET_IN_PROGRESS; channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
_spdk_bdev_abort_queued_io(&module_ch->nomem_io, channel); _spdk_bdev_abort_queued_io(&shared_resource->nomem_io, channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, channel); _spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, channel); _spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, channel);
@ -1929,15 +1930,15 @@ _spdk_bdev_reset_freeze_qos_channel(void *ctx)
struct spdk_bdev *bdev = ctx; struct spdk_bdev *bdev = ctx;
struct spdk_bdev_mgmt_channel *mgmt_channel = NULL; struct spdk_bdev_mgmt_channel *mgmt_channel = NULL;
struct spdk_bdev_channel *qos_channel = bdev->qos->ch; struct spdk_bdev_channel *qos_channel = bdev->qos->ch;
struct spdk_bdev_module_channel *module_ch = NULL; struct spdk_bdev_shared_resource *shared_resource = NULL;
if (qos_channel) { if (qos_channel) {
module_ch = qos_channel->module_ch; shared_resource = qos_channel->shared_resource;
mgmt_channel = module_ch->mgmt_ch; mgmt_channel = shared_resource->mgmt_ch;
qos_channel->flags |= BDEV_CH_RESET_IN_PROGRESS; qos_channel->flags |= BDEV_CH_RESET_IN_PROGRESS;
_spdk_bdev_abort_queued_io(&module_ch->nomem_io, qos_channel); _spdk_bdev_abort_queued_io(&shared_resource->nomem_io, qos_channel);
_spdk_bdev_abort_queued_io(&bdev->qos->queued, qos_channel); _spdk_bdev_abort_queued_io(&bdev->qos->queued, qos_channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, qos_channel); _spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_small, qos_channel);
_spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, qos_channel); _spdk_bdev_abort_buf_io(&mgmt_channel->need_buf_large, qos_channel);
@ -2159,10 +2160,10 @@ static void
_spdk_bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch) _spdk_bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
{ {
struct spdk_bdev *bdev = bdev_ch->bdev; struct spdk_bdev *bdev = bdev_ch->bdev;
struct spdk_bdev_module_channel *module_ch = bdev_ch->module_ch; struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
if (module_ch->io_outstanding > module_ch->nomem_threshold) { if (shared_resource->io_outstanding > shared_resource->nomem_threshold) {
/* /*
* Allow some more I/O to complete before retrying the nomem_io queue. * Allow some more I/O to complete before retrying the nomem_io queue.
* Some drivers (such as nvme) cannot immediately take a new I/O in * Some drivers (such as nvme) cannot immediately take a new I/O in
@ -2174,11 +2175,11 @@ _spdk_bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
return; return;
} }
while (!TAILQ_EMPTY(&module_ch->nomem_io)) { while (!TAILQ_EMPTY(&shared_resource->nomem_io)) {
bdev_io = TAILQ_FIRST(&module_ch->nomem_io); bdev_io = TAILQ_FIRST(&shared_resource->nomem_io);
TAILQ_REMOVE(&module_ch->nomem_io, bdev_io, link); TAILQ_REMOVE(&shared_resource->nomem_io, bdev_io, link);
bdev_io->ch->io_outstanding++; bdev_io->ch->io_outstanding++;
module_ch->io_outstanding++; shared_resource->io_outstanding++;
bdev_io->status = SPDK_BDEV_IO_STATUS_PENDING; bdev_io->status = SPDK_BDEV_IO_STATUS_PENDING;
bdev->fn_table->submit_request(bdev_io->ch->channel, bdev_io); bdev->fn_table->submit_request(bdev_io->ch->channel, bdev_io);
if (bdev_io->status == SPDK_BDEV_IO_STATUS_NOMEM) { if (bdev_io->status == SPDK_BDEV_IO_STATUS_NOMEM) {
@ -2298,7 +2299,7 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
{ {
struct spdk_bdev *bdev = bdev_io->bdev; struct spdk_bdev *bdev = bdev_io->bdev;
struct spdk_bdev_channel *bdev_ch = bdev_io->ch; struct spdk_bdev_channel *bdev_ch = bdev_io->ch;
struct spdk_bdev_module_channel *module_ch = bdev_ch->module_ch; struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
bdev_io->status = status; bdev_io->status = status;
@ -2328,24 +2329,24 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
} }
} else { } else {
assert(bdev_ch->io_outstanding > 0); assert(bdev_ch->io_outstanding > 0);
assert(module_ch->io_outstanding > 0); assert(shared_resource->io_outstanding > 0);
bdev_ch->io_outstanding--; bdev_ch->io_outstanding--;
module_ch->io_outstanding--; shared_resource->io_outstanding--;
if (spdk_unlikely(status == SPDK_BDEV_IO_STATUS_NOMEM)) { if (spdk_unlikely(status == SPDK_BDEV_IO_STATUS_NOMEM)) {
TAILQ_INSERT_HEAD(&module_ch->nomem_io, bdev_io, link); TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, link);
/* /*
* Wait for some of the outstanding I/O to complete before we * Wait for some of the outstanding I/O to complete before we
* retry any of the nomem_io. Normally we will wait for * retry any of the nomem_io. Normally we will wait for
* NOMEM_THRESHOLD_COUNT I/O to complete but for low queue * NOMEM_THRESHOLD_COUNT I/O to complete but for low queue
* depth channels we will instead wait for half to complete. * depth channels we will instead wait for half to complete.
*/ */
module_ch->nomem_threshold = spdk_max((int64_t)module_ch->io_outstanding / 2, shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
(int64_t)module_ch->io_outstanding - NOMEM_THRESHOLD_COUNT); (int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
return; return;
} }
if (spdk_unlikely(!TAILQ_EMPTY(&module_ch->nomem_io))) { if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
_spdk_bdev_ch_retry_io(bdev_ch); _spdk_bdev_ch_retry_io(bdev_ch);
} }
} }

View File

@ -883,7 +883,7 @@ enomem(void)
{ {
struct spdk_io_channel *io_ch; struct spdk_io_channel *io_ch;
struct spdk_bdev_channel *bdev_ch; struct spdk_bdev_channel *bdev_ch;
struct spdk_bdev_module_channel *module_ch; struct spdk_bdev_shared_resource *shared_resource;
struct ut_bdev_channel *ut_ch; struct ut_bdev_channel *ut_ch;
const uint32_t IO_ARRAY_SIZE = 64; const uint32_t IO_ARRAY_SIZE = 64;
const uint32_t AVAIL = 20; const uint32_t AVAIL = 20;
@ -897,7 +897,7 @@ enomem(void)
set_thread(0); set_thread(0);
io_ch = spdk_bdev_get_io_channel(g_desc); io_ch = spdk_bdev_get_io_channel(g_desc);
bdev_ch = spdk_io_channel_get_ctx(io_ch); bdev_ch = spdk_io_channel_get_ctx(io_ch);
module_ch = bdev_ch->module_ch; shared_resource = bdev_ch->shared_resource;
ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
ut_ch->avail_cnt = AVAIL; ut_ch->avail_cnt = AVAIL;
@ -907,7 +907,7 @@ enomem(void)
rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
} }
CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io)); CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
/* /*
* Next, submit one additional I/O. This one should fail with ENOMEM and then go onto * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto
@ -916,8 +916,8 @@ enomem(void)
status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io)); SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
first_io = TAILQ_FIRST(&module_ch->nomem_io); first_io = TAILQ_FIRST(&shared_resource->nomem_io);
/* /*
* Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind
@ -930,10 +930,10 @@ enomem(void)
} }
/* Assert that first_io is still at the head of the list. */ /* Assert that first_io is still at the head of the list. */
CU_ASSERT(TAILQ_FIRST(&module_ch->nomem_io) == first_io); CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL)); CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io); nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
CU_ASSERT(module_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT)); CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
/* /*
* Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have
@ -941,19 +941,19 @@ enomem(void)
* list. * list.
*/ */
stub_complete_io(g_bdev.io_target, 1); stub_complete_io(g_bdev.io_target, 1);
CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt); CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
/* /*
* Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io, * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io,
* and we should see I/O get resubmitted to the test bdev module. * and we should see I/O get resubmitted to the test bdev module.
*/ */
stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1); stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) < nomem_cnt); CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io); nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
/* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */ /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */
stub_complete_io(g_bdev.io_target, 1); stub_complete_io(g_bdev.io_target, 1);
CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt); CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
/* /*
* Send a reset and confirm that all I/O are completed, including the ones that * Send a reset and confirm that all I/O are completed, including the ones that
@ -966,8 +966,8 @@ enomem(void)
/* This will complete the reset. */ /* This will complete the reset. */
stub_complete_io(g_bdev.io_target, 0); stub_complete_io(g_bdev.io_target, 0);
CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == 0); CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
CU_ASSERT(module_ch->io_outstanding == 0); CU_ASSERT(shared_resource->io_outstanding == 0);
spdk_put_io_channel(io_ch); spdk_put_io_channel(io_ch);
poll_threads(); poll_threads();
@ -979,7 +979,7 @@ enomem_multi_bdev(void)
{ {
struct spdk_io_channel *io_ch; struct spdk_io_channel *io_ch;
struct spdk_bdev_channel *bdev_ch; struct spdk_bdev_channel *bdev_ch;
struct spdk_bdev_module_channel *module_ch; struct spdk_bdev_shared_resource *shared_resource;
struct ut_bdev_channel *ut_ch; struct ut_bdev_channel *ut_ch;
const uint32_t IO_ARRAY_SIZE = 64; const uint32_t IO_ARRAY_SIZE = 64;
const uint32_t AVAIL = 20; const uint32_t AVAIL = 20;
@ -1002,13 +1002,13 @@ enomem_multi_bdev(void)
set_thread(0); set_thread(0);
io_ch = spdk_bdev_get_io_channel(g_desc); io_ch = spdk_bdev_get_io_channel(g_desc);
bdev_ch = spdk_io_channel_get_ctx(io_ch); bdev_ch = spdk_io_channel_get_ctx(io_ch);
module_ch = bdev_ch->module_ch; shared_resource = bdev_ch->shared_resource;
ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
ut_ch->avail_cnt = AVAIL; ut_ch->avail_cnt = AVAIL;
second_ch = spdk_bdev_get_io_channel(second_desc); second_ch = spdk_bdev_get_io_channel(second_desc);
second_bdev_ch = spdk_io_channel_get_ctx(second_ch); second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
SPDK_CU_ASSERT_FATAL(module_ch == second_bdev_ch->module_ch); SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
/* Saturate io_target through bdev A. */ /* Saturate io_target through bdev A. */
for (i = 0; i < AVAIL; i++) { for (i = 0; i < AVAIL; i++) {
@ -1016,7 +1016,7 @@ enomem_multi_bdev(void)
rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
} }
CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io)); CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
/* /*
* Now submit I/O through the second bdev. This should fail with ENOMEM * Now submit I/O through the second bdev. This should fail with ENOMEM
@ -1025,17 +1025,17 @@ enomem_multi_bdev(void)
status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io)); SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */ /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
stub_complete_io(g_bdev.io_target, AVAIL); stub_complete_io(g_bdev.io_target, AVAIL);
SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&module_ch->nomem_io)); SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
CU_ASSERT(module_ch->io_outstanding == 1); CU_ASSERT(shared_resource->io_outstanding == 1);
/* Now complete our retried I/O */ /* Now complete our retried I/O */
stub_complete_io(g_bdev.io_target, 1); stub_complete_io(g_bdev.io_target, 1);
SPDK_CU_ASSERT_FATAL(module_ch->io_outstanding == 0); SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
spdk_put_io_channel(io_ch); spdk_put_io_channel(io_ch);
spdk_put_io_channel(second_ch); spdk_put_io_channel(second_ch);
@ -1046,6 +1046,7 @@ enomem_multi_bdev(void)
teardown_test(); teardown_test();
} }
static void static void
enomem_multi_io_target(void) enomem_multi_io_target(void)
{ {
@ -1079,10 +1080,10 @@ enomem_multi_io_target(void)
ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel); ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
ut_ch->avail_cnt = AVAIL; ut_ch->avail_cnt = AVAIL;
/* Different io_target should imply a different module_ch */ /* Different io_target should imply a different shared_resource */
second_ch = spdk_bdev_get_io_channel(second_desc); second_ch = spdk_bdev_get_io_channel(second_desc);
second_bdev_ch = spdk_io_channel_get_ctx(second_ch); second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
SPDK_CU_ASSERT_FATAL(bdev_ch->module_ch != second_bdev_ch->module_ch); SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
/* Saturate io_target through bdev A. */ /* Saturate io_target through bdev A. */
for (i = 0; i < AVAIL; i++) { for (i = 0; i < AVAIL; i++) {
@ -1090,13 +1091,13 @@ enomem_multi_io_target(void)
rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]); rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
} }
CU_ASSERT(TAILQ_EMPTY(&bdev_ch->module_ch->nomem_io)); CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
/* Issue one more I/O to fill ENOMEM list. */ /* Issue one more I/O to fill ENOMEM list. */
status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->module_ch->nomem_io)); SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
/* /*
* Now submit I/O through the second bdev. This should go through and complete * Now submit I/O through the second bdev. This should go through and complete
@ -1105,18 +1106,18 @@ enomem_multi_io_target(void)
status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING; status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]); rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->module_ch->nomem_io)); SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
stub_complete_io(second_bdev->io_target, 1); stub_complete_io(second_bdev->io_target, 1);
/* Cleanup; Complete outstanding I/O. */ /* Cleanup; Complete outstanding I/O. */
stub_complete_io(g_bdev.io_target, AVAIL); stub_complete_io(g_bdev.io_target, AVAIL);
SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->module_ch->nomem_io)); SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
/* Complete the ENOMEM I/O */ /* Complete the ENOMEM I/O */
stub_complete_io(g_bdev.io_target, 1); stub_complete_io(g_bdev.io_target, 1);
CU_ASSERT(bdev_ch->module_ch->io_outstanding == 0); CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->module_ch->nomem_io)); SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
CU_ASSERT(bdev_ch->module_ch->io_outstanding == 0); CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
spdk_put_io_channel(io_ch); spdk_put_io_channel(io_ch);
spdk_put_io_channel(second_ch); spdk_put_io_channel(second_ch);
spdk_bdev_close(second_desc); spdk_bdev_close(second_desc);