bdev: add split_on_optimal_io_boundary

A number of modules (RAID, logical volumes) have logical
"stripes" that require splitting an I/O into several
child I/O.  For example, on a RAID-0 with 128KB strip size,
an I/O that spans a 128KB boundary will require sending
one I/O for the portion that comes before the boundary to
one member disk, and another I/O for the portion that comes
after the boundary to another member disk.  Logical volumes
are similar - data is allocated in clusters, so an I/O that
spans a cluster boundary may need to be split since the
clusters may not be contiguous on disk.

Putting the splitting logic in the common bdev layer ensures
bdev module authors don't have to always do this themselves.
This is especially helpful for cases like splitting an I/O
described by many iovs - we can simplify this a lot by
handling it in the common bdev layer.

Note that currently we will only submit one child I/O
at a time.  This could be improved later to submit multiple
child I/O in parallel, but the complexity in the iov splitting
code also increases a lot.

Note: Some Intel NVMe SSDs have a similar characteristic.
We will not use this bdev stripe feature for NVMe though -
we want to primarily use the splitting functionality inside
of the NVMe driver itself to ensure it remains fully
functional.  Many SPDK users use the NVMe driver without
the bdev layer.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Ife804ecc56f6b2b55345a0d0ae9fda9e68632b3b

Reviewed-on: https://review.gerrithub.io/423024
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Jim Harris 2018-08-17 11:04:04 -07:00
parent 17a819f882
commit 4bd9762165
3 changed files with 550 additions and 3 deletions

View File

@ -249,7 +249,15 @@ struct spdk_bdev {
* This is used to make sure buffers are sector aligned.
* This causes double buffering on writes.
*/
int need_aligned_buffer;
bool need_aligned_buffer;
/**
* Specifies whether the optimal_io_boundary is mandatory or
* only advisory. If set to true, the bdev layer will split
* I/O that span the optimal_io_boundary before submitting them
* to the bdev module.
*/
bool split_on_optimal_io_boundary;
/**
* Optimal I/O boundary in blocks, or 0 for no value reported.
@ -332,6 +340,8 @@ struct spdk_bdev {
typedef void (*spdk_bdev_io_get_buf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
#define BDEV_IO_NUM_CHILD_IOV 32
struct spdk_bdev_io {
/** The block device that this I/O belongs to. */
struct spdk_bdev *bdev;
@ -342,6 +352,9 @@ struct spdk_bdev_io {
/** A single iovec element for use by this bdev_io. */
struct iovec iov;
/** Array of iovecs used for I/O splitting. */
struct iovec child_iov[BDEV_IO_NUM_CHILD_IOV];
union {
struct {
/** For SG buffer cases, array of iovecs to transfer. */
@ -460,6 +473,9 @@ struct spdk_bdev_io {
/** Entry to the list need_buf of struct spdk_bdev. */
STAILQ_ENTRY(spdk_bdev_io) buf_link;
/** Enables queuing parent I/O when no bdev_ios available for split children. */
struct spdk_bdev_io_wait_entry waitq_entry;
} internal;
/**

View File

@ -1047,6 +1047,226 @@ _spdk_bdev_qos_io_submit(struct spdk_bdev_channel *ch)
}
}
static bool
_spdk_bdev_io_type_can_split(uint8_t type)
{
assert(type != SPDK_BDEV_IO_TYPE_INVALID);
assert(type < SPDK_BDEV_NUM_IO_TYPES);
switch (type) {
case SPDK_BDEV_IO_TYPE_RESET:
case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
case SPDK_BDEV_IO_TYPE_NVME_IO:
case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
/* These types of bdev_io do not specify an LBA offset/length. */
return false;
default:
return true;
}
}
static bool
_spdk_bdev_io_spans_boundary(struct spdk_bdev_io *bdev_io)
{
uint64_t start_stripe, end_stripe;
uint32_t io_boundary = bdev_io->bdev->optimal_io_boundary;
if (io_boundary == 0) {
return false;
}
if (!_spdk_bdev_io_type_can_split(bdev_io->type)) {
return false;
}
start_stripe = bdev_io->u.bdev.offset_blocks;
end_stripe = start_stripe + bdev_io->u.bdev.num_blocks - 1;
/* Avoid expensive div operations if possible. These spdk_u32 functions are very cheap. */
if (spdk_likely(spdk_u32_is_pow2(io_boundary))) {
start_stripe >>= spdk_u32log2(io_boundary);
end_stripe >>= spdk_u32log2(io_boundary);
} else {
start_stripe /= io_boundary;
end_stripe /= io_boundary;
}
return (start_stripe != end_stripe);
}
static uint32_t
_to_next_boundary(uint64_t offset, uint32_t boundary)
{
return (boundary - (offset % boundary));
}
static void
_spdk_bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
static void
_spdk_bdev_io_split_with_payload(void *_bdev_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
uint64_t current_offset, remaining, bytes_handled;
uint32_t blocklen, to_next_boundary, to_next_boundary_bytes;
struct iovec *parent_iov;
uint64_t parent_iov_offset, child_iov_len;
uint32_t child_iovcnt;
int rc;
remaining = bdev_io->u.bdev.split_remaining_num_blocks;
current_offset = bdev_io->u.bdev.split_current_offset_blocks;
blocklen = bdev_io->bdev->blocklen;
bytes_handled = (current_offset - bdev_io->u.bdev.offset_blocks) * blocklen;
parent_iov = &bdev_io->u.bdev.iovs[0];
parent_iov_offset = 0;
while (bytes_handled > 0) {
if (bytes_handled >= parent_iov->iov_len) {
bytes_handled -= parent_iov->iov_len;
parent_iov++;
continue;
}
parent_iov_offset += bytes_handled;
break;
}
to_next_boundary = _to_next_boundary(current_offset, bdev_io->bdev->optimal_io_boundary);
to_next_boundary = spdk_min(remaining, to_next_boundary);
to_next_boundary_bytes = to_next_boundary * blocklen;
child_iovcnt = 0;
while (to_next_boundary_bytes > 0) {
child_iov_len = spdk_min(to_next_boundary_bytes, parent_iov->iov_len - parent_iov_offset);
to_next_boundary_bytes -= child_iov_len;
bdev_io->child_iov[child_iovcnt].iov_base = parent_iov->iov_base + parent_iov_offset;
bdev_io->child_iov[child_iovcnt].iov_len = child_iov_len;
parent_iov++;
parent_iov_offset = 0;
child_iovcnt++;
if (child_iovcnt == BDEV_IO_NUM_CHILD_IOV && to_next_boundary_bytes > 0) {
/* We've run out of child iovs - we need to fail this I/O. */
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
bdev_io->internal.cb(bdev_io, SPDK_BDEV_IO_STATUS_FAILED,
bdev_io->internal.caller_ctx);
return;
}
}
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
rc = spdk_bdev_readv_blocks(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch),
bdev_io->child_iov, child_iovcnt, current_offset, to_next_boundary,
_spdk_bdev_io_split_done, bdev_io);
} else {
rc = spdk_bdev_writev_blocks(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch),
bdev_io->child_iov, child_iovcnt, current_offset, to_next_boundary,
_spdk_bdev_io_split_done, bdev_io);
}
if (rc == 0) {
bdev_io->u.bdev.split_current_offset_blocks += to_next_boundary;
bdev_io->u.bdev.split_remaining_num_blocks -= to_next_boundary;
} else {
assert(rc == -ENOMEM);
bdev_io->internal.waitq_entry.bdev = bdev_io->bdev;
bdev_io->internal.waitq_entry.cb_fn = _spdk_bdev_io_split_with_payload;
bdev_io->internal.waitq_entry.cb_arg = bdev_io;
spdk_bdev_queue_io_wait(bdev_io->bdev, spdk_io_channel_from_ctx(bdev_io->internal.ch),
&bdev_io->internal.waitq_entry);
}
}
static void
_spdk_bdev_io_split_no_payload(void *_bdev_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
uint64_t current_offset, remaining;
uint32_t to_next_boundary;
int rc;
remaining = bdev_io->u.bdev.split_remaining_num_blocks;
current_offset = bdev_io->u.bdev.split_current_offset_blocks;
to_next_boundary = _to_next_boundary(current_offset, bdev_io->bdev->optimal_io_boundary);
to_next_boundary = spdk_min(remaining, to_next_boundary);
if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP) {
rc = spdk_bdev_unmap_blocks(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch),
current_offset, to_next_boundary,
_spdk_bdev_io_split_done, bdev_io);
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE_ZEROES) {
rc = spdk_bdev_write_zeroes_blocks(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch),
current_offset, to_next_boundary,
_spdk_bdev_io_split_done, bdev_io);
} else {
assert(bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH);
rc = spdk_bdev_flush_blocks(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch),
current_offset, to_next_boundary,
_spdk_bdev_io_split_done, bdev_io);
}
if (rc == 0) {
bdev_io->u.bdev.split_current_offset_blocks += to_next_boundary;
bdev_io->u.bdev.split_remaining_num_blocks -= to_next_boundary;
} else {
assert(rc == -ENOMEM);
bdev_io->internal.waitq_entry.bdev = bdev_io->bdev;
bdev_io->internal.waitq_entry.cb_fn = _spdk_bdev_io_split_with_payload;
bdev_io->internal.waitq_entry.cb_arg = bdev_io;
spdk_bdev_queue_io_wait(bdev_io->bdev, spdk_io_channel_from_ctx(bdev_io->internal.ch),
&bdev_io->internal.waitq_entry);
}
}
static void
_spdk_bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
struct spdk_bdev_io *parent_io = cb_arg;
spdk_bdev_free_io(bdev_io);
if (!success) {
parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
parent_io->internal.cb(parent_io, SPDK_BDEV_IO_STATUS_FAILED, parent_io->internal.caller_ctx);
return;
}
if (parent_io->u.bdev.split_remaining_num_blocks == 0) {
parent_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
parent_io->internal.cb(parent_io, SPDK_BDEV_IO_STATUS_SUCCESS, parent_io->internal.caller_ctx);
return;
}
/*
* Continue with the splitting process. This function will complete the parent I/O if the
* splitting is done.
*/
if (parent_io->type == SPDK_BDEV_IO_TYPE_READ || parent_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
_spdk_bdev_io_split_with_payload(parent_io);
} else {
_spdk_bdev_io_split_no_payload(parent_io);
}
}
static void
_spdk_bdev_io_split(struct spdk_bdev_io *bdev_io)
{
assert(_spdk_bdev_io_type_can_split(bdev_io->type));
bdev_io->u.bdev.split_current_offset_blocks = bdev_io->u.bdev.offset_blocks;
bdev_io->u.bdev.split_remaining_num_blocks = bdev_io->u.bdev.num_blocks;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ || bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
_spdk_bdev_io_split_with_payload(bdev_io);
} else {
_spdk_bdev_io_split_no_payload(bdev_io);
}
}
static void
_spdk_bdev_io_submit(void *ctx)
{
@ -1091,6 +1311,11 @@ spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io)
assert(thread != NULL);
assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_PENDING);
if (bdev->split_on_optimal_io_boundary && _spdk_bdev_io_spans_boundary(bdev_io)) {
_spdk_bdev_io_split(bdev_io);
return;
}
if (bdev_io->internal.ch->flags & BDEV_CH_QOS_ENABLED) {
if ((thread == bdev->internal.qos->thread) || !bdev->internal.qos->thread) {
_spdk_bdev_io_submit(bdev_io);

View File

@ -80,8 +80,14 @@ stub_destruct(void *ctx)
struct bdev_ut_channel {
TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
uint32_t outstanding_io_count;
uint8_t expected_iotype;
uint64_t expected_offset;
uint64_t expected_length;
int expected_iovcnt;
struct iovec expected_iov[32];
};
static bool g_io_done;
static uint32_t g_bdev_ut_io_device;
static struct bdev_ut_channel *g_bdev_ut_channel;
@ -89,9 +95,35 @@ static void
stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
{
struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
struct iovec *iov, *expected_iov;
int i;
TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
ch->outstanding_io_count++;
if (ch->expected_iotype != SPDK_BDEV_IO_TYPE_INVALID) {
CU_ASSERT(bdev_io->type == ch->expected_iotype);
}
if (ch->expected_length == 0) {
return;
}
CU_ASSERT(ch->expected_offset == bdev_io->u.bdev.offset_blocks);
CU_ASSERT(ch->expected_length = bdev_io->u.bdev.num_blocks);
if (ch->expected_iovcnt == 0) {
/* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
return;
}
CU_ASSERT(ch->expected_iovcnt == bdev_io->u.bdev.iovcnt);
for (i = 0; i < ch->expected_iovcnt; i++) {
iov = &bdev_io->u.bdev.iovs[i];
expected_iov = &ch->expected_iov[i];
CU_ASSERT(iov->iov_len == expected_iov->iov_len);
CU_ASSERT(iov->iov_base == expected_iov->iov_base);
}
}
static uint32_t
@ -121,10 +153,17 @@ bdev_ut_get_io_channel(void *ctx)
return spdk_get_io_channel(&g_bdev_ut_io_device);
}
static bool
stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
{
return true;
}
static struct spdk_bdev_fn_table fn_table = {
.destruct = stub_destruct,
.submit_request = stub_submit_request,
.get_io_channel = bdev_ut_get_io_channel,
.io_type_supported = stub_io_type_supported,
};
static int
@ -208,7 +247,8 @@ allocate_bdev(char *name)
bdev->name = name;
bdev->fn_table = &fn_table;
bdev->module = &bdev_ut_if;
bdev->blockcnt = 1;
bdev->blockcnt = 256;
bdev->blocklen = 512;
rc = spdk_bdev_register(bdev);
CU_ASSERT(rc == 0);
@ -587,6 +627,7 @@ alias_add_del_test(void)
static void
io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
g_io_done = true;
spdk_bdev_free_io(bdev_io);
}
@ -694,6 +735,269 @@ bdev_io_wait_test(void)
spdk_bdev_finish(bdev_fini_cb, NULL);
}
static void
bdev_io_spans_boundary_test(void)
{
struct spdk_bdev bdev;
struct spdk_bdev_io bdev_io;
memset(&bdev, 0, sizeof(bdev));
bdev.optimal_io_boundary = 0;
bdev_io.bdev = &bdev;
/* bdev has no optimal_io_boundary set - so this should return false. */
CU_ASSERT(_spdk_bdev_io_spans_boundary(&bdev_io) == false);
bdev.optimal_io_boundary = 32;
bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
/* RESETs are not based on LBAs - so this should return false. */
CU_ASSERT(_spdk_bdev_io_spans_boundary(&bdev_io) == false);
bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
bdev_io.u.bdev.offset_blocks = 0;
bdev_io.u.bdev.num_blocks = 32;
/* This I/O run right up to, but does not cross, the boundary - so this should return false. */
CU_ASSERT(_spdk_bdev_io_spans_boundary(&bdev_io) == false);
bdev_io.u.bdev.num_blocks = 33;
/* This I/O spans a boundary. */
CU_ASSERT(_spdk_bdev_io_spans_boundary(&bdev_io) == true);
}
static void
bdev_io_split(void)
{
struct spdk_bdev *bdev;
struct spdk_bdev_desc *desc;
struct spdk_io_channel *io_ch;
struct spdk_bdev_opts bdev_opts = {
.bdev_io_pool_size = 512,
.bdev_io_cache_size = 64,
};
struct iovec iov[4];
int rc;
rc = spdk_bdev_set_opts(&bdev_opts);
CU_ASSERT(rc == 0);
spdk_bdev_initialize(bdev_init_cb, NULL);
bdev = allocate_bdev("bdev0");
rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
CU_ASSERT(rc == 0);
CU_ASSERT(desc != NULL);
io_ch = spdk_bdev_get_io_channel(desc);
CU_ASSERT(io_ch != NULL);
bdev->optimal_io_boundary = 16;
bdev->split_on_optimal_io_boundary = false;
g_io_done = false;
/* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
g_bdev_ut_channel->expected_iotype = SPDK_BDEV_IO_TYPE_READ;
g_bdev_ut_channel->expected_offset = 14;
g_bdev_ut_channel->expected_length = 8;
g_bdev_ut_channel->expected_iovcnt = 1;
g_bdev_ut_channel->expected_iov[0].iov_base = (void *)0xF000;
g_bdev_ut_channel->expected_iov[0].iov_len = 8 * 512;
rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_io_done == false);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == true);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
bdev->split_on_optimal_io_boundary = true;
/* Now test that a single-vector command is split correctly.
* Offset 14, length 8, payload 0xF000
* Child - Offset 14, length 2, payload 0xF000
* Child - Offset 16, length 6, payload 0xF000 + 2 * 512
*
* Set up the expected values before calling spdk_bdev_read_blocks, since this call
* will submit the first child immediately.
*/
g_io_done = false;
g_bdev_ut_channel->expected_iotype = SPDK_BDEV_IO_TYPE_READ;
g_bdev_ut_channel->expected_offset = 14;
g_bdev_ut_channel->expected_length = 2;
g_bdev_ut_channel->expected_iovcnt = 1;
g_bdev_ut_channel->expected_iov[0].iov_base = (void *)0xF000;
g_bdev_ut_channel->expected_iov[0].iov_len = 2 * 512;
rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_io_done == false);
/* Now set up the expected values for the second child. The second child will
* get submitted once the first child is completed by stub_complete_io().
*/
g_bdev_ut_channel->expected_offset = 16;
g_bdev_ut_channel->expected_length = 6;
g_bdev_ut_channel->expected_iovcnt = 1;
g_bdev_ut_channel->expected_iov[0].iov_base = (void *)(0xF000 + 2 * 512);
g_bdev_ut_channel->expected_iov[0].iov_len = 6 * 512;
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == false);
/* Complete the second child I/O. This should result in our callback getting
* invoked since the parent I/O is now complete.
*/
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == true);
/* Now set up a more complex, multi-vector command that needs to be split,
* including splitting iovecs.
*/
iov[0].iov_base = (void *)0x10000;
iov[0].iov_len = 512;
iov[1].iov_base = (void *)0x20000;
iov[1].iov_len = 20 * 512;
iov[2].iov_base = (void *)0x30000;
iov[2].iov_len = 11 * 512;
g_io_done = false;
g_bdev_ut_channel->expected_iotype = SPDK_BDEV_IO_TYPE_WRITE;
g_bdev_ut_channel->expected_offset = 14;
g_bdev_ut_channel->expected_length = 2;
g_bdev_ut_channel->expected_iovcnt = 2;
g_bdev_ut_channel->expected_iov[0].iov_base = (void *)0x10000;
g_bdev_ut_channel->expected_iov[0].iov_len = 512;
g_bdev_ut_channel->expected_iov[1].iov_base = (void *)0x20000;
g_bdev_ut_channel->expected_iov[1].iov_len = 512;
rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_io_done == false);
g_bdev_ut_channel->expected_offset = 16;
g_bdev_ut_channel->expected_length = 16;
g_bdev_ut_channel->expected_iovcnt = 1;
g_bdev_ut_channel->expected_iov[0].iov_base = (void *)(0x20000 + 512);
g_bdev_ut_channel->expected_iov[0].iov_len = 16 * 512;
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == false);
g_bdev_ut_channel->expected_offset = 32;
g_bdev_ut_channel->expected_length = 14;
g_bdev_ut_channel->expected_iovcnt = 2;
g_bdev_ut_channel->expected_iov[0].iov_base = (void *)(0x20000 + 17 * 512);
g_bdev_ut_channel->expected_iov[0].iov_len = 3 * 512;
g_bdev_ut_channel->expected_iov[1].iov_base = (void *)0x30000;
g_bdev_ut_channel->expected_iov[1].iov_len = 11 * 512;
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == false);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == true);
/* Test a WRITE_ZEROES that needs to be split. This is an I/O type that does not have iovecs.
* Have this I/O end right on a boundary. Use a non-standard optimal_io_boundary to test the
* non-power-of-2 path.
*/
bdev->optimal_io_boundary = 15;
g_io_done = false;
g_bdev_ut_channel->expected_iotype = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
g_bdev_ut_channel->expected_offset = 9;
g_bdev_ut_channel->expected_length = 6;
g_bdev_ut_channel->expected_iovcnt = 0;
rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_io_done == false);
g_bdev_ut_channel->expected_offset = 15;
g_bdev_ut_channel->expected_length = 15;
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == false);
g_bdev_ut_channel->expected_offset = 30;
g_bdev_ut_channel->expected_length = 15;
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == false);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == true);
/* Test an UNMAP that needs to be split. This is an I/O type that does not have iovecs. */
bdev->optimal_io_boundary = 16;
g_io_done = false;
g_bdev_ut_channel->expected_iotype = SPDK_BDEV_IO_TYPE_UNMAP;
g_bdev_ut_channel->expected_offset = 15;
g_bdev_ut_channel->expected_length = 1;
g_bdev_ut_channel->expected_iovcnt = 0;
rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_io_done == false);
g_bdev_ut_channel->expected_offset = 16;
g_bdev_ut_channel->expected_length = 1;
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == false);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == true);
/* Test an FLUSH that needs to be split. This is an I/O type that does not have iovecs. */
bdev->optimal_io_boundary = 16;
g_io_done = false;
g_bdev_ut_channel->expected_iotype = SPDK_BDEV_IO_TYPE_FLUSH;
g_bdev_ut_channel->expected_offset = 15;
g_bdev_ut_channel->expected_length = 1;
g_bdev_ut_channel->expected_iovcnt = 0;
rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
CU_ASSERT(rc == 0);
CU_ASSERT(g_io_done == false);
g_bdev_ut_channel->expected_offset = 16;
g_bdev_ut_channel->expected_length = 1;
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == false);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == true);
/* Reset values so next test is not affected by leftover values. */
g_bdev_ut_channel->expected_iotype = SPDK_BDEV_IO_TYPE_INVALID;
g_bdev_ut_channel->expected_offset = 0;
g_bdev_ut_channel->expected_length = 0;
g_bdev_ut_channel->expected_iovcnt = 0;
spdk_put_io_channel(io_ch);
spdk_bdev_close(desc);
free_bdev(bdev);
spdk_bdev_finish(bdev_fini_cb, NULL);
}
int
main(int argc, char **argv)
{
@ -717,7 +1021,9 @@ main(int argc, char **argv)
CU_add_test(suite, "open_write", open_write_test) == NULL ||
CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL
CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL
) {
CU_cleanup_registry();
return CU_get_error();