blobstore: allow I/O operations to use io unit size smaller than page size.

Signed-off-by: Piotr Pelplinski <piotr.pelplinski@intel.com>
Change-Id: I994b5d46faffd34430cb39e66225929c4cba90ba
Reviewed-on: https://review.gerrithub.io/414935
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Piotr Pelplinski 2018-10-01 16:20:00 +02:00 committed by Jim Harris
parent 14bbde7bd4
commit 6609b776e4
5 changed files with 844 additions and 97 deletions

View File

@ -133,8 +133,8 @@ spdk_bs_create_blob_bs_dev(struct spdk_blob *blob)
}
/* snapshot blob */
b->bs_dev.blockcnt = blob->active.num_clusters *
blob->bs->pages_per_cluster;
b->bs_dev.blocklen = SPDK_BS_PAGE_SIZE;
blob->bs->pages_per_cluster * _spdk_bs_io_unit_per_page(blob->bs);
b->bs_dev.blocklen = spdk_bs_get_io_unit_size(blob->bs);
b->bs_dev.create_channel = NULL;
b->bs_dev.destroy_channel = NULL;
b->bs_dev.destroy = blob_bs_dev_destroy;

View File

@ -1612,7 +1612,7 @@ _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
static void
_spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
struct spdk_io_channel *_ch,
uint64_t offset, spdk_bs_user_op_t *op)
uint64_t io_unit, spdk_bs_user_op_t *op)
{
struct spdk_bs_cpl cpl;
struct spdk_bs_channel *ch;
@ -1631,12 +1631,12 @@ _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
return;
}
/* Round the page offset down to the first page in the cluster */
cluster_start_page = _spdk_bs_page_to_cluster_start(blob, offset);
/* Round the io_unit offset down to the first page in the cluster */
cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit);
/* Calculate which index in the metadata cluster array the corresponding
* cluster is supposed to be at. */
cluster_number = _spdk_bs_page_to_cluster(blob->bs, cluster_start_page);
cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit);
ctx = calloc(1, sizeof(*ctx));
if (!ctx) {
@ -1697,25 +1697,25 @@ _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
}
static void
_spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t page, uint64_t length,
_spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
uint64_t *lba, uint32_t *lba_count)
{
*lba_count = _spdk_bs_page_to_lba(blob->bs, length);
*lba_count = length;
if (!_spdk_bs_page_is_allocated(blob, page)) {
if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) {
assert(blob->back_bs_dev != NULL);
*lba = _spdk_bs_dev_page_to_lba(blob->back_bs_dev, page);
*lba_count = _spdk_bs_blob_lba_to_back_dev_lba(blob, *lba_count);
*lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit);
*lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count);
} else {
*lba = _spdk_bs_blob_page_to_lba(blob, page);
*lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit);
}
}
struct op_split_ctx {
struct spdk_blob *blob;
struct spdk_io_channel *channel;
uint64_t page_offset;
uint64_t pages_remaining;
uint64_t io_unit_offset;
uint64_t io_units_remaining;
void *curr_payload;
enum spdk_blob_op_type op_type;
spdk_bs_sequence_t *seq;
@ -1729,23 +1729,24 @@ _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
struct spdk_io_channel *ch = ctx->channel;
enum spdk_blob_op_type op_type = ctx->op_type;
uint8_t *buf = ctx->curr_payload;
uint64_t offset = ctx->page_offset;
uint64_t length = ctx->pages_remaining;
uint64_t offset = ctx->io_unit_offset;
uint64_t length = ctx->io_units_remaining;
uint64_t op_length;
if (bserrno != 0 || ctx->pages_remaining == 0) {
if (bserrno != 0 || ctx->io_units_remaining == 0) {
spdk_bs_sequence_finish(ctx->seq, bserrno);
free(ctx);
return;
}
op_length = spdk_min(length, _spdk_bs_num_pages_to_cluster_boundary(blob, offset));
op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob,
offset));
/* Update length and payload for next operation */
ctx->pages_remaining -= op_length;
ctx->page_offset += op_length;
ctx->io_units_remaining -= op_length;
ctx->io_unit_offset += op_length;
if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
ctx->curr_payload += (op_length * SPDK_BS_PAGE_SIZE);
ctx->curr_payload += op_length * blob->bs->io_unit_size;
}
switch (op_type) {
@ -1805,8 +1806,8 @@ _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob
ctx->blob = blob;
ctx->channel = ch;
ctx->curr_payload = payload;
ctx->page_offset = offset;
ctx->pages_remaining = length;
ctx->io_unit_offset = offset;
ctx->io_units_remaining = length;
ctx->op_type = op_type;
ctx->seq = seq;
@ -1856,7 +1857,7 @@ _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blo
return;
}
if (_spdk_bs_page_is_allocated(blob, offset)) {
if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
/* Read from the blob */
spdk_bs_batch_read_dev(batch, payload, lba, lba_count);
} else {
@ -1869,7 +1870,7 @@ _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blo
}
case SPDK_BLOB_WRITE:
case SPDK_BLOB_WRITE_ZEROES: {
if (_spdk_bs_page_is_allocated(blob, offset)) {
if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
/* Write to the blob */
spdk_bs_batch_t *batch;
@ -1914,7 +1915,7 @@ _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blo
return;
}
if (_spdk_bs_page_is_allocated(blob, offset)) {
if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
spdk_bs_batch_unmap_dev(batch, lba, lba_count);
}
@ -1941,12 +1942,11 @@ _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_ch
return;
}
if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
cb_fn(cb_arg, -EINVAL);
return;
}
if (length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset)) {
if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) {
_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
cb_fn, cb_arg, op_type);
} else {
@ -1963,9 +1963,9 @@ struct rw_iov_ctx {
bool read;
int iovcnt;
struct iovec *orig_iov;
uint64_t page_offset;
uint64_t pages_remaining;
uint64_t pages_done;
uint64_t io_unit_offset;
uint64_t io_units_remaining;
uint64_t io_units_done;
struct iovec iov[0];
};
@ -1984,25 +1984,24 @@ _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
struct iovec *iov, *orig_iov;
int iovcnt;
size_t orig_iovoff;
uint64_t page_count, pages_to_boundary, page_offset;
uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
uint64_t byte_count;
if (bserrno != 0 || ctx->pages_remaining == 0) {
if (bserrno != 0 || ctx->io_units_remaining == 0) {
ctx->cb_fn(ctx->cb_arg, bserrno);
free(ctx);
return;
}
page_offset = ctx->page_offset;
pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(blob, page_offset);
page_count = spdk_min(ctx->pages_remaining, pages_to_boundary);
io_unit_offset = ctx->io_unit_offset;
io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
/*
* Get index and offset into the original iov array for our current position in the I/O sequence.
* byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
* point to the current position in the I/O sequence.
*/
byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page);
byte_count = ctx->io_units_done * blob->bs->io_unit_size;
orig_iov = &ctx->orig_iov[0];
orig_iovoff = 0;
while (byte_count > 0) {
@ -2019,7 +2018,7 @@ _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
* Build an iov array for the next I/O in the sequence. byte_count will keep track of how many
* bytes of this next I/O remain to be accounted for in the new iov array.
*/
byte_count = page_count * sizeof(struct spdk_blob_md_page);
byte_count = io_units_count * blob->bs->io_unit_size;
iov = &ctx->iov[0];
iovcnt = 0;
while (byte_count > 0) {
@ -2033,17 +2032,17 @@ _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
iovcnt++;
}
ctx->page_offset += page_count;
ctx->pages_done += page_count;
ctx->pages_remaining -= page_count;
ctx->io_unit_offset += io_units_count;
ctx->io_units_remaining -= io_units_count;
ctx->io_units_done += io_units_count;
iov = &ctx->iov[0];
if (ctx->read) {
spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
page_count, _spdk_rw_iov_split_next, ctx);
spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
io_units_count, _spdk_rw_iov_split_next, ctx);
} else {
spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
page_count, _spdk_rw_iov_split_next, ctx);
spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
io_units_count, _spdk_rw_iov_split_next, ctx);
}
}
@ -2066,7 +2065,7 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel
return;
}
if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
cb_fn(cb_arg, -EINVAL);
return;
}
@ -2085,7 +2084,7 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel
* in a batch. That would also require creating an intermediate spdk_bs_cpl that would get called
* when the batch was completed, to allow for freeing the memory for the iov arrays.
*/
if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) {
if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) {
uint32_t lba_count;
uint64_t lba;
@ -2119,14 +2118,14 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel
return;
}
if (_spdk_bs_page_is_allocated(blob, offset)) {
if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
} else {
spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
_spdk_rw_iov_done, NULL);
}
} else {
if (_spdk_bs_page_is_allocated(blob, offset)) {
if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
spdk_bs_sequence_t *seq;
seq = spdk_bs_sequence_start(_channel, &cpl);
@ -2140,7 +2139,8 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel
/* Queue this operation and allocate the cluster */
spdk_bs_user_op_t *op;
op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, length);
op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
length);
if (!op) {
cb_fn(cb_arg, -ENOMEM);
return;
@ -2165,9 +2165,9 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel
ctx->read = read;
ctx->orig_iov = iov;
ctx->iovcnt = iovcnt;
ctx->page_offset = offset;
ctx->pages_remaining = length;
ctx->pages_done = 0;
ctx->io_unit_offset = offset;
ctx->io_units_remaining = length;
ctx->io_units_done = 0;
_spdk_rw_iov_split_next(ctx, 0);
}
@ -2463,6 +2463,7 @@ _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_b
bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
bs->num_free_clusters = bs->total_clusters;
bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
bs->io_unit_size = dev->blocklen;
if (bs->used_clusters == NULL) {
free(bs);
return -ENOMEM;
@ -3098,11 +3099,16 @@ _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
}
if (ctx->super->io_unit_size == 0) {
ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
}
/* Parse the super block */
ctx->bs->clean = 1;
ctx->bs->cluster_sz = ctx->super->cluster_size;
ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
ctx->bs->io_unit_size = ctx->super->io_unit_size;
rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
if (rc < 0) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
@ -3554,7 +3560,6 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
} else {
bs->md_len = opts.num_md_pages;
}
rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
if (rc < 0) {
_spdk_bs_free(bs);
@ -3593,6 +3598,7 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
ctx->super->super_blob = bs->super_blob;
ctx->super->clean = 0;
ctx->super->cluster_size = bs->cluster_sz;
ctx->super->io_unit_size = bs->io_unit_size;
memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
/* Calculate how many pages the metadata consumes at the front
@ -3975,7 +3981,7 @@ spdk_bs_get_page_size(struct spdk_blob_store *bs)
uint64_t
spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
{
return SPDK_BS_PAGE_SIZE;
return bs->io_unit_size;
}
uint64_t
@ -4028,7 +4034,7 @@ uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob)
{
assert(blob != NULL);
return spdk_blob_get_num_pages(blob);
return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs);
}
uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
@ -4728,7 +4734,7 @@ _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
}
if (ctx->cluster < _blob->active.num_clusters) {
offset = _spdk_bs_cluster_to_page(_blob->bs, ctx->cluster);
offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster);
/* We may safely increment a cluster before write */
ctx->cluster++;

View File

@ -173,6 +173,7 @@ struct spdk_blob_store {
uint64_t total_data_clusters;
uint64_t num_free_clusters;
uint64_t pages_per_cluster;
uint32_t io_unit_size;
spdk_blob_id super_blob;
struct spdk_bs_type bstype;
@ -344,8 +345,9 @@ struct spdk_bs_super_block {
uint32_t used_blobid_mask_len; /* Count, in pages */
uint64_t size; /* size of blobstore in bytes */
uint32_t io_unit_size; /* Size of io unit in bytes */
uint8_t reserved[4004];
uint8_t reserved[4000];
uint32_t crc;
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block) == 0x1000, "Invalid super block size");
@ -388,12 +390,6 @@ _spdk_bs_dev_byte_to_lba(struct spdk_bs_dev *bs_dev, uint64_t length)
return length / bs_dev->blocklen;
}
static inline uint64_t
_spdk_bs_lba_to_byte(struct spdk_blob_store *bs, uint64_t lba)
{
return lba * bs->dev->blocklen;
}
static inline uint64_t
_spdk_bs_page_to_lba(struct spdk_blob_store *bs, uint64_t page)
{
@ -407,27 +403,15 @@ _spdk_bs_dev_page_to_lba(struct spdk_bs_dev *bs_dev, uint64_t page)
}
static inline uint64_t
_spdk_bs_lba_to_page(struct spdk_blob_store *bs, uint64_t lba)
_spdk_bs_io_unit_per_page(struct spdk_blob_store *bs)
{
uint64_t lbas_per_page;
lbas_per_page = SPDK_BS_PAGE_SIZE / bs->dev->blocklen;
assert(lba % lbas_per_page == 0);
return lba / lbas_per_page;
return SPDK_BS_PAGE_SIZE / bs->io_unit_size;
}
static inline uint64_t
_spdk_bs_dev_lba_to_page(struct spdk_bs_dev *bs_dev, uint64_t lba)
_spdk_bs_io_unit_to_page(struct spdk_blob_store *bs, uint64_t io_unit)
{
uint64_t lbas_per_page;
lbas_per_page = SPDK_BS_PAGE_SIZE / bs_dev->blocklen;
assert(lba % lbas_per_page == 0);
return lba / lbas_per_page;
return io_unit / _spdk_bs_io_unit_per_page(bs);
}
static inline uint64_t
@ -459,15 +443,15 @@ _spdk_bs_lba_to_cluster(struct spdk_blob_store *bs, uint64_t lba)
}
static inline uint64_t
_spdk_bs_blob_lba_to_back_dev_lba(struct spdk_blob *blob, uint64_t lba)
_spdk_bs_io_unit_to_back_dev_lba(struct spdk_blob *blob, uint64_t io_unit)
{
return lba * blob->bs->dev->blocklen / blob->back_bs_dev->blocklen;
return io_unit * (blob->bs->io_unit_size / blob->back_bs_dev->blocklen);
}
static inline uint64_t
_spdk_bs_blob_lba_from_back_dev_lba(struct spdk_blob *blob, uint64_t lba)
_spdk_bs_back_dev_lba_to_io_unit(struct spdk_blob *blob, uint64_t lba)
{
return lba * blob->back_bs_dev->blocklen / blob->bs->dev->blocklen;
return lba * (blob->back_bs_dev->blocklen / blob->bs->io_unit_size);
}
/* End basic conversions */
@ -491,25 +475,44 @@ _spdk_bs_page_to_blobid(uint64_t page_idx)
return SPDK_BLOB_BLOBID_HIGH_BIT | page_idx;
}
/* Given a page offset into a blob, look up the LBA for the
* start of that page.
/* Given an io unit offset into a blob, look up the LBA for the
* start of that io unit.
*/
static inline uint64_t
_spdk_bs_blob_page_to_lba(struct spdk_blob *blob, uint64_t page)
_spdk_bs_blob_io_unit_to_lba(struct spdk_blob *blob, uint64_t io_unit)
{
uint64_t lba;
uint64_t pages_per_cluster;
uint64_t io_units_per_cluster;
uint64_t io_units_per_page;
uint64_t page;
page = _spdk_bs_io_unit_to_page(blob->bs, io_unit);
pages_per_cluster = blob->bs->pages_per_cluster;
io_units_per_page = _spdk_bs_io_unit_per_page(blob->bs);
io_units_per_cluster = io_units_per_page * pages_per_cluster;
assert(page < blob->active.num_clusters * pages_per_cluster);
lba = blob->active.clusters[page / pages_per_cluster];
lba += _spdk_bs_page_to_lba(blob->bs, page % pages_per_cluster);
lba += io_unit % io_units_per_cluster;
return lba;
}
/* Given an io_unit offset into a blob, look up the number of io_units until the
* next cluster boundary.
*/
static inline uint32_t
_spdk_bs_num_io_units_to_cluster_boundary(struct spdk_blob *blob, uint64_t io_unit)
{
uint64_t io_units_per_cluster;
io_units_per_cluster = _spdk_bs_io_unit_per_page(blob->bs) * blob->bs->pages_per_cluster;
return io_units_per_cluster - (io_unit % io_units_per_cluster);
}
/* Given a page offset into a blob, look up the number of pages until the
* next cluster boundary.
*/
@ -523,25 +526,36 @@ _spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob *blob, uint64_t page)
return pages_per_cluster - (page % pages_per_cluster);
}
/* Given a page offset into a blob, look up the number of pages into blob to beginning of current cluster */
/* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
static inline uint32_t
_spdk_bs_page_to_cluster_start(struct spdk_blob *blob, uint64_t page)
_spdk_bs_io_unit_to_cluster_start(struct spdk_blob *blob, uint64_t io_unit)
{
uint64_t pages_per_cluster;
uint64_t page;
pages_per_cluster = blob->bs->pages_per_cluster;
page = _spdk_bs_io_unit_to_page(blob->bs, io_unit);
return page - (page % pages_per_cluster);
}
/* Given a page offset into a blob, look up if it is from allocated cluster. */
/* Given an io_unit offset into a blob, look up the number of pages into blob to beginning of current cluster */
static inline uint32_t
_spdk_bs_io_unit_to_cluster_number(struct spdk_blob *blob, uint64_t io_unit)
{
return (io_unit / _spdk_bs_io_unit_per_page(blob->bs)) / blob->bs->pages_per_cluster;
}
/* Given an io unit offset into a blob, look up if it is from allocated cluster. */
static inline bool
_spdk_bs_page_is_allocated(struct spdk_blob *blob, uint64_t page)
_spdk_bs_io_unit_is_allocated(struct spdk_blob *blob, uint64_t io_unit)
{
uint64_t lba;
uint64_t page;
uint64_t pages_per_cluster;
pages_per_cluster = blob->bs->pages_per_cluster;
page = _spdk_bs_io_unit_to_page(blob->bs, io_unit);
assert(page < blob->active.num_clusters * pages_per_cluster);

View File

@ -103,7 +103,7 @@ zeroes_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
static struct spdk_bs_dev g_zeroes_bs_dev = {
.blockcnt = UINT64_MAX,
.blocklen = SPDK_BS_PAGE_SIZE,
.blocklen = 512,
.create_channel = NULL,
.destroy_channel = NULL,
.destroy = zeroes_destroy,

View File

@ -5104,6 +5104,731 @@ blob_relations(void)
g_bs = NULL;
}
static void
test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
{
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
uint8_t *cluster0, *cluster1;
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
memset(payload_00, 0x00, sizeof(payload_00));
/* Try to perform I/O with io unit = 512 */
spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* If thin provisioned is set cluster should be allocated now */
SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
/* Verify write with offset on first page */
spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
/* Verify write with offset on first page */
spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
/* Verify write with offset on second page */
spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
/* Verify write across multiple pages */
spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
/* Verify write across multiple clusters */
spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
/* Verify write to second cluster */
spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
}
static void
test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
{
uint8_t payload_read[64 * 512];
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
memset(payload_00, 0x00, sizeof(payload_00));
/* Read only first io unit */
/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: F000 0000 | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
/* Read four io_units starting from offset = 2
* cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: F0AA 0000 | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
/* Read eight io_units across multiple pages
* cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: AAAA AAAA | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
/* Read eight io_units across multiple clusters
* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
* cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: FFFF FFFF | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
/* Read four io_units from second cluster
* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
* payload_read: 00FF 0000 | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
/* Read second cluster
* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
* payload_read: FFFF 0000 | 0000 FF00 ... */
memset(payload_read, 0x00, sizeof(payload_read));
spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
/* Read whole two clusters
* cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
memset(payload_read, 0x00, sizeof(payload_read));
spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
}
static void
test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
{
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
uint8_t *cluster0, *cluster1;
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
memset(payload_00, 0x00, sizeof(payload_00));
cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
/* Unmap */
spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
}
static void
test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
{
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
uint8_t *cluster0, *cluster1;
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
memset(payload_00, 0x00, sizeof(payload_00));
cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
/* Write zeroes */
spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
}
static void
test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
{
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
uint8_t *cluster0, *cluster1;
struct iovec iov[4];
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
memset(payload_00, 0x00, sizeof(payload_00));
/* Try to perform I/O with io unit = 512 */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 1 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* If thin provisioned is set cluster should be allocated now */
SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
/* Verify write with offset on first page */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 1 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
/* Verify write with offset on first page */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 4 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
/* Verify write with offset on second page */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 4 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
/* Verify write across multiple pages */
iov[0].iov_base = payload_aa;
iov[0].iov_len = 8 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
/* Verify write across multiple clusters */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 8 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
/* Verify write to second cluster */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 2 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
}
static void
test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
{
uint8_t payload_read[64 * 512];
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
struct iovec iov[4];
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
memset(payload_00, 0x00, sizeof(payload_00));
/* Read only first io unit */
/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: F000 0000 | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 1 * 512;
spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
/* Read four io_units starting from offset = 2
* cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: F0AA 0000 | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 4 * 512;
spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
/* Read eight io_units across multiple pages
* cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: AAAA AAAA | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 4 * 512;
iov[1].iov_base = payload_read + 4 * 512;
iov[1].iov_len = 4 * 512;
spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
/* Read eight io_units across multiple clusters
* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
* cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
* payload_read: FFFF FFFF | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 2 * 512;
iov[1].iov_base = payload_read + 2 * 512;
iov[1].iov_len = 2 * 512;
iov[2].iov_base = payload_read + 4 * 512;
iov[2].iov_len = 2 * 512;
iov[3].iov_base = payload_read + 6 * 512;
iov[3].iov_len = 2 * 512;
spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
/* Read four io_units from second cluster
* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
* payload_read: 00FF 0000 | 0000 0000 ... */
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 1 * 512;
iov[1].iov_base = payload_read + 1 * 512;
iov[1].iov_len = 3 * 512;
spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
/* Read second cluster
* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
* payload_read: FFFF 0000 | 0000 FF00 ... */
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 1 * 512;
iov[1].iov_base = payload_read + 1 * 512;
iov[1].iov_len = 2 * 512;
iov[2].iov_base = payload_read + 3 * 512;
iov[2].iov_len = 4 * 512;
iov[3].iov_base = payload_read + 7 * 512;
iov[3].iov_len = 25 * 512;
spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
/* Read whole two clusters
* cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
* cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 1 * 512;
iov[1].iov_base = payload_read + 1 * 512;
iov[1].iov_len = 8 * 512;
iov[2].iov_base = payload_read + 9 * 512;
iov[2].iov_len = 16 * 512;
iov[3].iov_base = payload_read + 25 * 512;
iov[3].iov_len = 39 * 512;
spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
}
static void
blob_io_unit(void)
{
struct spdk_bs_opts bsopts;
struct spdk_blob_opts opts;
struct spdk_bs_dev *dev;
struct spdk_blob *blob, *snapshot, *clone;
spdk_blob_id blobid;
struct spdk_io_channel *channel;
/* Create dev with 512 bytes io unit size */
spdk_bs_opts_init(&bsopts);
bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; // 8 * 4 = 32 io_unit
snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
/* Try to initialize a new blob store with unsupported io_unit */
dev = init_dev();
dev->blocklen = 512;
dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
/* Initialize a new blob store */
spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
CU_ASSERT(spdk_bs_get_io_unit_size(g_bs) == 512);
channel = spdk_bs_alloc_io_channel(g_bs);
/* Create thick provisioned blob */
spdk_blob_opts_init(&opts);
opts.thin_provision = false;
opts.num_clusters = 32;
spdk_bs_create_blob_ext(g_bs, &opts, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
test_io_write(dev, blob, channel);
test_io_read(dev, blob, channel);
test_io_zeroes(dev, blob, channel);
test_iov_write(dev, blob, channel);
test_iov_read(dev, blob, channel);
test_io_unmap(dev, blob, channel);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
blob = NULL;
g_blob = NULL;
/* Create thin provisioned blob */
spdk_blob_opts_init(&opts);
opts.thin_provision = true;
opts.num_clusters = 32;
spdk_bs_create_blob_ext(g_bs, &opts, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
blob = g_blob;
test_io_write(dev, blob, channel);
test_io_read(dev, blob, channel);
test_io_zeroes(dev, blob, channel);
test_iov_write(dev, blob, channel);
test_iov_read(dev, blob, channel);
/* Create snapshot */
spdk_bs_create_snapshot(g_bs, blobid, NULL, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
snapshot = g_blob;
spdk_bs_create_clone(g_bs, blobid, NULL, blob_op_with_id_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
blobid = g_blobid;
spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
CU_ASSERT(g_blob != NULL);
clone = g_blob;
test_io_read(dev, blob, channel);
test_io_read(dev, snapshot, channel);
test_io_read(dev, clone, channel);
test_iov_read(dev, blob, channel);
test_iov_read(dev, snapshot, channel);
test_iov_read(dev, clone, channel);
/* Inflate clone */
spdk_bs_inflate_blob(g_bs, channel, blobid, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
test_io_read(dev, clone, channel);
test_io_unmap(dev, clone, channel);
test_iov_write(dev, clone, channel);
test_iov_read(dev, clone, channel);
spdk_blob_close(blob, blob_op_complete, NULL);
spdk_blob_close(snapshot, blob_op_complete, NULL);
spdk_blob_close(clone, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
blob = NULL;
g_blob = NULL;
/* Unload the blob store */
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
g_blob = NULL;
g_blobid = 0;
}
static void
blob_io_unit_compatiblity(void)
{
struct spdk_bs_opts bsopts;
struct spdk_bs_dev *dev;
struct spdk_bs_super_block *super;
/* Create dev with 512 bytes io unit size */
spdk_bs_opts_init(&bsopts);
bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; // 8 * 4 = 32 io_unit
snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
/* Try to initialize a new blob store with unsupported io_unit */
dev = init_dev();
dev->blocklen = 512;
dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
/* Initialize a new blob store */
spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
CU_ASSERT(spdk_bs_get_io_unit_size(g_bs) == 512);
/* Unload the blob store */
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Modify super block to behave like older version.
* Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
super->io_unit_size = 0;
super->crc = _spdk_blob_md_page_calc_crc(super);
dev = init_dev();
dev->blocklen = 512;
dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_bs != NULL);
CU_ASSERT(spdk_bs_get_io_unit_size(g_bs) == SPDK_BS_PAGE_SIZE);
/* Unload the blob store */
spdk_bs_unload(g_bs, bs_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
g_blob = NULL;
g_blobid = 0;
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -5169,7 +5894,9 @@ int main(int argc, char **argv)
CU_add_test(suite, "blob_inflate_rw", blob_inflate_rw) == NULL ||
CU_add_test(suite, "blob_snapshot_freeze_io", blob_snapshot_freeze_io) == NULL ||
CU_add_test(suite, "blob_operation_split_rw", blob_operation_split_rw) == NULL ||
CU_add_test(suite, "blob_operation_split_rw_iov", blob_operation_split_rw_iov) == NULL
CU_add_test(suite, "blob_operation_split_rw_iov", blob_operation_split_rw_iov) == NULL ||
CU_add_test(suite, "blob_io_unit", blob_io_unit) == NULL ||
CU_add_test(suite, "blob_io_unit_compatiblity", blob_io_unit_compatiblity) == NULL
) {
CU_cleanup_registry();
return CU_get_error();