bdev: Factor out spdk_bdev_io_complete
Split part of IO completion into 2 functions. This patch reduces changes in the next patch which will reuse some part of spdk_bdev_io_complete Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com> Change-Id: Iaeac81aa5208b4ca303f60410b6a54f8df13b069 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11519 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
52d1cb5003
commit
9cd85928e5
@ -1016,6 +1016,42 @@ bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
_bdev_io_decrement_outstanding(struct spdk_bdev_channel *bdev_ch,
|
||||||
|
struct spdk_bdev_shared_resource *shared_resource)
|
||||||
|
{
|
||||||
|
assert(bdev_ch->io_outstanding > 0);
|
||||||
|
assert(shared_resource->io_outstanding > 0);
|
||||||
|
bdev_ch->io_outstanding--;
|
||||||
|
shared_resource->io_outstanding--;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
_bdev_io_handle_no_mem(struct spdk_bdev_io *bdev_io)
|
||||||
|
{
|
||||||
|
struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
|
||||||
|
struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;
|
||||||
|
|
||||||
|
if (spdk_unlikely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM)) {
|
||||||
|
TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, internal.link);
|
||||||
|
/*
|
||||||
|
* Wait for some of the outstanding I/O to complete before we
|
||||||
|
* retry any of the nomem_io. Normally we will wait for
|
||||||
|
* NOMEM_THRESHOLD_COUNT I/O to complete but for low queue
|
||||||
|
* depth channels we will instead wait for half to complete.
|
||||||
|
*/
|
||||||
|
shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
|
||||||
|
(int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
|
||||||
|
bdev_ch_retry_io(bdev_ch);
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
_bdev_io_unset_bounce_buf(struct spdk_bdev_io *bdev_io)
|
_bdev_io_unset_bounce_buf(struct spdk_bdev_io *bdev_io)
|
||||||
{
|
{
|
||||||
@ -5510,28 +5546,10 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_bdev_io_unset_bounce_buf(bdev_io);
|
_bdev_io_unset_bounce_buf(bdev_io);
|
||||||
|
_bdev_io_decrement_outstanding(bdev_ch, shared_resource);
|
||||||
assert(bdev_ch->io_outstanding > 0);
|
if (spdk_unlikely(_bdev_io_handle_no_mem(bdev_io))) {
|
||||||
assert(shared_resource->io_outstanding > 0);
|
|
||||||
bdev_ch->io_outstanding--;
|
|
||||||
shared_resource->io_outstanding--;
|
|
||||||
|
|
||||||
if (spdk_unlikely(status == SPDK_BDEV_IO_STATUS_NOMEM)) {
|
|
||||||
TAILQ_INSERT_HEAD(&shared_resource->nomem_io, bdev_io, internal.link);
|
|
||||||
/*
|
|
||||||
* Wait for some of the outstanding I/O to complete before we
|
|
||||||
* retry any of the nomem_io. Normally we will wait for
|
|
||||||
* NOMEM_THRESHOLD_COUNT I/O to complete but for low queue
|
|
||||||
* depth channels we will instead wait for half to complete.
|
|
||||||
*/
|
|
||||||
shared_resource->nomem_threshold = spdk_max((int64_t)shared_resource->io_outstanding / 2,
|
|
||||||
(int64_t)shared_resource->io_outstanding - NOMEM_THRESHOLD_COUNT);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (spdk_unlikely(!TAILQ_EMPTY(&shared_resource->nomem_io))) {
|
|
||||||
bdev_ch_retry_io(bdev_ch);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bdev_io_complete(bdev_io);
|
bdev_io_complete(bdev_io);
|
||||||
|
Loading…
Reference in New Issue
Block a user