blob: add readv/writev support

Most of the work here revolves around having to split
an I/O that spans a cluster boundary.  In this case
we need to allocate a separate iov array, and then
issue each sub-I/O serially, copying the relevant
subset of the original iov array.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I0d46b3f832245900d109ee6c78cc6d49cf96428b

Reviewed-on: https://review.gerrithub.io/374880
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Jim Harris 2017-08-18 09:41:26 -07:00
parent 26e9b6eafa
commit 179ed697b3
12 changed files with 513 additions and 3 deletions

View File

@ -45,6 +45,10 @@ additional clarity when constructing spdk_mempools. Previously, -1 could be
passed and the library would choose a reasonable default, but this new value
makes it explicit that the default is being used.
### Blobstore
spdk_bs_io_readv_blob() and spdk_bs_io_writev_blob() were added to enable
scattered payloads.
## v17.07: Build system improvements, userspace vhost-blk target, and GPT bdev

View File

@ -118,6 +118,16 @@ struct spdk_bs_dev {
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args);
void (*readv)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args);
void (*writev)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args);
void (*flush)(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct spdk_bs_dev_cb_args *cb_args);
@ -233,12 +243,21 @@ void spdk_bs_io_write_blob(struct spdk_blob *blob, struct spdk_io_channel *chann
void *payload, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Read data from a blob. Offset is in pages from the beginning of the blob. */
void spdk_bs_io_read_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
void *payload, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Write data to a blob. Offset is in pages from the beginning of the blob. */
void spdk_bs_io_writev_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Read data from a blob. Offset is in pages from the beginning of the blob. */
void spdk_bs_io_readv_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg);
/* Iterate through all blobs */
void spdk_bs_md_iter_first(struct spdk_blob_store *bs,
spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);

View File

@ -102,6 +102,8 @@
}
/* declare wrapper protos (alphabetically please) here */
DECLARE_WRAPPER(calloc, void *, (size_t nmemb, size_t size));
DECLARE_WRAPPER(pthread_mutex_init, int,
(pthread_mutex_t *mtx, const pthread_mutexattr_t *attr));

View File

@ -103,6 +103,39 @@ bdev_blob_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *
}
}
static void
bdev_blob_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
{
struct spdk_bdev *bdev = __get_bdev(dev);
int rc;
uint32_t block_size = spdk_bdev_get_block_size(bdev);
rc = spdk_bdev_readv(__get_desc(dev), channel, iov, iovcnt, lba * block_size,
lba_count * block_size, bdev_blob_io_complete, cb_args);
if (rc) {
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
}
}
static void
bdev_blob_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
{
struct spdk_bdev *bdev = __get_bdev(dev);
int rc;
uint32_t block_size = spdk_bdev_get_block_size(bdev);
rc = spdk_bdev_writev(__get_desc(dev), channel, iov, iovcnt, lba * block_size,
lba_count * block_size, bdev_blob_io_complete, cb_args);
if (rc) {
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, rc);
}
}
static void
bdev_blob_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, uint64_t lba,
uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args)
@ -171,6 +204,8 @@ spdk_bdev_create_bs_dev(struct spdk_bdev *bdev)
b->bs_dev.destroy = bdev_blob_destroy;
b->bs_dev.read = bdev_blob_read;
b->bs_dev.write = bdev_blob_write;
b->bs_dev.readv = bdev_blob_readv;
b->bs_dev.writev = bdev_blob_writev;
b->bs_dev.unmap = bdev_blob_unmap;
return &b->bs_dev;

View File

@ -38,6 +38,7 @@
#include "spdk/queue.h"
#include "spdk/io_channel.h"
#include "spdk/bit_array.h"
#include "spdk/likely.h"
#include "spdk_internal/log.h"
@ -1062,6 +1063,168 @@ _spdk_blob_request_submit_rw(struct spdk_blob *blob, struct spdk_io_channel *_ch
spdk_bs_batch_close(batch);
}
struct rw_iov_ctx {
struct spdk_blob *blob;
bool read;
int iovcnt;
struct iovec *orig_iov;
uint64_t page_offset;
uint64_t pages_remaining;
uint64_t pages_done;
struct iovec iov[0];
};
static void
_spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
assert(cb_arg == NULL);
spdk_bs_sequence_finish(seq, bserrno);
}
static void
_spdk_rw_iov_split_next(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct rw_iov_ctx *ctx = cb_arg;
struct iovec *iov, *orig_iov;
int iovcnt;
size_t orig_iovoff;
uint64_t lba;
uint64_t page_count, pages_to_boundary;
uint32_t lba_count;
uint64_t byte_count;
if (bserrno != 0 || ctx->pages_remaining == 0) {
free(ctx);
spdk_bs_sequence_finish(seq, bserrno);
return;
}
pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(ctx->blob, ctx->page_offset);
page_count = spdk_min(ctx->pages_remaining, pages_to_boundary);
lba = _spdk_bs_blob_page_to_lba(ctx->blob, ctx->page_offset);
lba_count = _spdk_bs_page_to_lba(ctx->blob->bs, page_count);
/*
* Get index and offset into the original iov array for our current position in the I/O sequence.
* byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
* point to the current position in the I/O sequence.
*/
byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page);
orig_iov = &ctx->orig_iov[0];
orig_iovoff = 0;
while (byte_count > 0) {
if (byte_count >= orig_iov->iov_len) {
byte_count -= orig_iov->iov_len;
orig_iov++;
} else {
orig_iovoff = byte_count;
byte_count = 0;
}
}
/*
* Build an iov array for the next I/O in the sequence. byte_count will keep track of how many
* bytes of this next I/O remain to be accounted for in the new iov array.
*/
byte_count = page_count * sizeof(struct spdk_blob_md_page);
iov = &ctx->iov[0];
iovcnt = 0;
while (byte_count > 0) {
iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
iov->iov_base = orig_iov->iov_base + orig_iovoff;
byte_count -= iov->iov_len;
orig_iovoff = 0;
orig_iov++;
iov++;
iovcnt++;
}
ctx->page_offset += page_count;
ctx->pages_done += page_count;
ctx->pages_remaining -= page_count;
iov = &ctx->iov[0];
if (ctx->read) {
spdk_bs_sequence_readv(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_split_next, ctx);
} else {
spdk_bs_sequence_writev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_split_next, ctx);
}
}
static void
_spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
{
spdk_bs_sequence_t *seq;
struct spdk_bs_cpl cpl;
assert(blob != NULL);
if (length == 0) {
cb_fn(cb_arg, 0);
return;
}
if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
cb_fn(cb_arg, -EINVAL);
return;
}
cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
cpl.u.blob_basic.cb_fn = cb_fn;
cpl.u.blob_basic.cb_arg = cb_arg;
/*
* For now, we implement readv/writev using a sequence (instead of a batch) to account for having
* to split a request that spans a cluster boundary. For I/O that do not span a cluster boundary,
* there will be no noticeable difference compared to using a batch. For I/O that do span a cluster
* boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
* to allocate a separate iov array and split the I/O such that none of the resulting
* smaller I/O cross a cluster boundary. These smaller I/O will be issued in sequence (not in parallel)
* but since this case happens very infrequently, any performance impact will be negligible.
*
* This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
* for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
* in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called
* when the batch was completed, to allow for freeing the memory for the iov arrays.
*/
seq = spdk_bs_sequence_start(_channel, &cpl);
if (!seq) {
cb_fn(cb_arg, -ENOMEM);
return;
}
if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) {
uint64_t lba = _spdk_bs_blob_page_to_lba(blob, offset);
uint32_t lba_count = _spdk_bs_page_to_lba(blob->bs, length);
if (read) {
spdk_bs_sequence_readv(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
} else {
spdk_bs_sequence_writev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
}
} else {
struct rw_iov_ctx *ctx;
ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
if (ctx == NULL) {
spdk_bs_sequence_finish(seq, -ENOMEM);
return;
}
ctx->blob = blob;
ctx->read = read;
ctx->orig_iov = iov;
ctx->iovcnt = iovcnt;
ctx->page_offset = offset;
ctx->pages_remaining = length;
ctx->pages_done = 0;
_spdk_rw_iov_split_next(seq, ctx, 0);
}
}
static struct spdk_blob *
_spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
{
@ -2167,6 +2330,20 @@ void spdk_bs_io_read_blob(struct spdk_blob *blob, struct spdk_io_channel *channe
_spdk_blob_request_submit_rw(blob, channel, payload, offset, length, cb_fn, cb_arg, true);
}
void spdk_bs_io_writev_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg)
{
_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
}
void spdk_bs_io_readv_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg)
{
_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
}
struct spdk_bs_iter_ctx {
int64_t page_num;
struct spdk_blob_store *bs;

View File

@ -156,6 +156,40 @@ spdk_bs_sequence_write(spdk_bs_sequence_t *seq, void *payload,
&set->cb_args);
}
void
spdk_bs_sequence_readv(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Reading %u blocks from LBA %lu\n", lba_count, lba);
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->readv(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_sequence_writev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)seq;
struct spdk_bs_channel *channel = set->channel;
SPDK_TRACELOG(SPDK_TRACE_BLOB_RW, "Writing %u blocks to LBA %lu\n", lba_count, lba);
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->writev(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count,
&set->cb_args);
}
void
spdk_bs_sequence_flush(spdk_bs_sequence_t *seq,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)

View File

@ -139,6 +139,14 @@ void spdk_bs_sequence_write(spdk_bs_sequence_t *seq, void *payload,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);
void spdk_bs_sequence_readv(spdk_bs_batch_t *batch, struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);
void spdk_bs_sequence_writev(spdk_bs_batch_t *batch, struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);
void spdk_bs_sequence_flush(spdk_bs_sequence_t *seq,
spdk_bs_sequence_cpl cb_fn, void *cb_arg);

View File

@ -39,3 +39,5 @@ DEFINE_WRAPPER(pthread_mutex_init, int,
DEFINE_WRAPPER(pthread_mutexattr_init, int,
(pthread_mutexattr_t *attr), (attr), MOCK_PASS_THRU)
DEFINE_WRAPPER(calloc, void *, (size_t nmemb, size_t size), (nmemb, size), (void *)MOCK_PASS_THRU)

View File

@ -30,4 +30,5 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
LDFLAGS += -Wl,--wrap,pthread_mutexattr_init -Wl,--wrap,pthread_mutex_init
LDFLAGS += -Wl,--wrap,pthread_mutexattr_init -Wl,--wrap,pthread_mutex_init \
-Wl,--wrap,calloc

View File

@ -34,13 +34,14 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
include $(SPDK_ROOT_DIR)/mk/spdk.mock.unittest.mk
APP = blob_ut
C_SRCS := blob_ut.c
CFLAGS += -I$(SPDK_ROOT_DIR)/lib/blob -I$(SPDK_ROOT_DIR)/test
SPDK_LIB_LIST = util log
SPDK_LIB_LIST = util log spdk_mock
LIBS += $(SPDK_LIB_LINKER_ARGS) -lcunit

View File

@ -517,6 +517,173 @@ blob_rw_verify(void)
g_bs = NULL;
}
static void
blob_rw_verify_iov(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
struct spdk_io_channel *channel;
spdk_blob_id blobid;
uint8_t payload_read[10 * 4096];
uint8_t payload_write[10 * 4096];
struct iovec iov_read[3];
struct iovec iov_write[3];
void *buf;
int rc;
init_dev(&dev);
memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
channel = spdk_bs_alloc_io_channel(bs);
CU_ASSERT(channel != NULL);
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
rc = spdk_bs_md_resize_blob(blob, 2);
CU_ASSERT(rc == 0);
/*
* Manually adjust the offset of the blob's second cluster. This allows
* us to make sure that the readv/write code correctly accounts for I/O
* that cross cluster boundaries. Start by asserting that the allocated
* clusters are where we expect before modifying the second cluster.
*/
CU_ASSERT(blob->active.clusters[0] == 1 * 256);
CU_ASSERT(blob->active.clusters[1] == 2 * 256);
blob->active.clusters[1] = 3 * 256;
memset(payload_write, 0xE5, sizeof(payload_write));
iov_write[0].iov_base = payload_write;
iov_write[0].iov_len = 1 * 4096;
iov_write[1].iov_base = payload_write + 1 * 4096;
iov_write[1].iov_len = 5 * 4096;
iov_write[2].iov_base = payload_write + 6 * 4096;
iov_write[2].iov_len = 4 * 4096;
/*
* Choose a page offset just before the cluster boundary. The first 6 pages of payload
* will get written to the first cluster, the last 4 to the second cluster.
*/
spdk_bs_io_writev_blob(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
memset(payload_read, 0xAA, sizeof(payload_read));
iov_read[0].iov_base = payload_read;
iov_read[0].iov_len = 3 * 4096;
iov_read[1].iov_base = payload_read + 3 * 4096;
iov_read[1].iov_len = 4 * 4096;
iov_read[2].iov_base = payload_read + 7 * 4096;
iov_read[2].iov_len = 3 * 4096;
spdk_bs_io_readv_blob(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
buf = calloc(1, 256 * 4096);
/* Check that cluster 2 on "disk" was not modified. */
CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
free(buf);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_free_io_channel(channel);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static uint32_t
bs_channel_get_req_count(struct spdk_io_channel *_channel)
{
struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
struct spdk_bs_request_set *set;
uint32_t count = 0;
TAILQ_FOREACH(set, &channel->reqs, link) {
count++;
}
return count;
}
static void
blob_rw_verify_iov_nomem(void)
{
struct spdk_blob_store *bs;
struct spdk_bs_dev dev;
struct spdk_blob *blob;
struct spdk_io_channel *channel;
spdk_blob_id blobid;
uint8_t payload_write[10 * 4096];
struct iovec iov_write[3];
uint32_t req_count;
int rc;
init_dev(&dev);
memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
spdk_bs_init(&dev, NULL, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
bs = g_bs;
channel = spdk_bs_alloc_io_channel(bs);
CU_ASSERT(channel != NULL);
spdk_bs_md_create_blob(bs, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_md_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
rc = spdk_bs_md_resize_blob(blob, 2);
CU_ASSERT(rc == 0);
/*
* Choose a page offset just before the cluster boundary. The first 6 pages of payload
* will get written to the first cluster, the last 4 to the second cluster.
*/
iov_write[0].iov_base = payload_write;
iov_write[0].iov_len = 1 * 4096;
iov_write[1].iov_base = payload_write + 1 * 4096;
iov_write[1].iov_len = 5 * 4096;
iov_write[2].iov_base = payload_write + 6 * 4096;
iov_write[2].iov_len = 4 * 4096;
MOCK_SET(calloc, void *, NULL);
req_count = bs_channel_get_req_count(channel);
spdk_bs_io_writev_blob(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
CU_ASSERT(g_bserrno = -ENOMEM);
CU_ASSERT(req_count == bs_channel_get_req_count(channel));
MOCK_SET(calloc, void *, (void *)MOCK_PASS_THRU);
spdk_bs_md_close_blob(&blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
spdk_bs_free_io_channel(channel);
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
}
static void
blob_iter(void)
{
@ -963,6 +1130,8 @@ int main(int argc, char **argv)
CU_add_test(suite, "blob_write", blob_write) == NULL ||
CU_add_test(suite, "blob_read", blob_read) == NULL ||
CU_add_test(suite, "blob_rw_verify", blob_rw_verify) == NULL ||
CU_add_test(suite, "blob_rw_verify_iov", blob_rw_verify_iov) == NULL ||
CU_add_test(suite, "blob_rw_verify_iov_nomem", blob_rw_verify_iov_nomem) == NULL ||
CU_add_test(suite, "blob_iter", blob_iter) == NULL ||
CU_add_test(suite, "blob_xattr", blob_xattr) == NULL ||
CU_add_test(suite, "bs_load", bs_load) == NULL ||

View File

@ -80,6 +80,62 @@ dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payloa
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
__check_iov(struct iovec *iov, int iovcnt, uint64_t length)
{
int i;
for (i = 0; i < iovcnt; i++) {
length -= iov[i].iov_len;
}
CU_ASSERT(length == 0);
}
static void
dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args)
{
uint64_t offset, length;
int i;
offset = lba * DEV_BUFFER_BLOCKLEN;
length = lba_count * DEV_BUFFER_BLOCKLEN;
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
__check_iov(iov, iovcnt, length);
for (i = 0; i < iovcnt; i++) {
memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
offset += iov[i].iov_len;
}
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args)
{
uint64_t offset, length;
int i;
offset = lba * DEV_BUFFER_BLOCKLEN;
length = lba_count * DEV_BUFFER_BLOCKLEN;
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
__check_iov(iov, iovcnt, length);
for (i = 0; i < iovcnt; i++) {
memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
offset += iov[i].iov_len;
}
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
}
static void
dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct spdk_bs_dev_cb_args *cb_args)
@ -109,6 +165,8 @@ init_dev(struct spdk_bs_dev *dev)
dev->destroy = dev_destroy;
dev->read = dev_read;
dev->write = dev_write;
dev->readv = dev_readv;
dev->writev = dev_writev;
dev->flush = dev_flush;
dev->unmap = dev_unmap;
dev->blockcnt = DEV_BUFFER_BLOCKCNT;