blob: revert spdk_blob_data changes

There was some thinking that we would need to allocate
I/O channels on a per-blob basis to handle dynamic
resizing during I/O.  Making spdk_blob an opaque handle,
with the existing spdk_blob structure renamed to
spdk_blob_data was a first step towards making that
happen.  But more recent work on blobstore has
simplified the resizing approach, so this spdk_blob_data
is no longer needed.  So revert it.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I22e07008faceb70649ee560176ebe5e014d5f1a3

Reviewed-on: https://review.gerrithub.io/400881
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Jim Harris 2018-02-20 11:14:12 -07:00
parent a2c6b71614
commit c8efd8a8b2
4 changed files with 164 additions and 207 deletions

View File

@ -462,7 +462,7 @@ show_blob(struct cli_context_t *cli_context)
* may be useful for debug of blobstore based applications.
*/
printf("\nBlob Private Info:\n");
switch (__blob_to_data(cli_context->blob)->state) {
switch (cli_context->blob->state) {
case SPDK_BLOB_STATE_DIRTY:
printf("state: DIRTY\n");
break;
@ -480,7 +480,7 @@ show_blob(struct cli_context_t *cli_context)
break;
}
printf("open ref count: %d\n",
__blob_to_data(cli_context->blob)->open_ref);
cli_context->blob->open_ref);
spdk_xattr_names_free(names);
}

View File

@ -50,14 +50,14 @@
static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob_data *blob, uint32_t cluster_num,
void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg);
static int _spdk_blob_set_xattr(struct spdk_blob_data *blob, const char *name, const void *value,
static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
uint16_t value_len, bool internal);
static int _spdk_blob_get_xattr_value(struct spdk_blob_data *blob, const char *name,
static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
const void **value, size_t *value_len, bool internal);
static int _spdk_blob_remove_xattr(struct spdk_blob_data *blob, const char *name, bool internal);
static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
static inline size_t
divide_round_up(size_t num, size_t divisor)
@ -79,7 +79,7 @@ _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
}
static int
_spdk_blob_insert_cluster(struct spdk_blob_data *blob, uint32_t cluster_num, uint64_t cluster)
_spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
{
uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
@ -94,7 +94,7 @@ _spdk_blob_insert_cluster(struct spdk_blob_data *blob, uint32_t cluster_num, uin
}
static int
_spdk_bs_allocate_cluster(struct spdk_blob_data *blob, uint32_t cluster_num,
_spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
uint64_t *lowest_free_cluster, bool update_map)
{
pthread_mutex_lock(&blob->bs->used_clusters_mutex);
@ -143,10 +143,10 @@ spdk_blob_opts_init(struct spdk_blob_opts *opts)
opts->xattrs.get_value = NULL;
}
static struct spdk_blob_data *
static struct spdk_blob *
_spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
{
struct spdk_blob_data *blob;
struct spdk_blob *blob;
blob = calloc(1, sizeof(*blob));
if (!blob) {
@ -186,7 +186,7 @@ _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
}
static void
_spdk_blob_free(struct spdk_blob_data *blob)
_spdk_blob_free(struct spdk_blob *blob)
{
assert(blob != NULL);
@ -202,7 +202,7 @@ _spdk_blob_free(struct spdk_blob_data *blob)
}
static int
_spdk_blob_mark_clean(struct spdk_blob_data *blob)
_spdk_blob_mark_clean(struct spdk_blob *blob)
{
uint64_t *clusters = NULL;
uint32_t *pages = NULL;
@ -247,7 +247,7 @@ _spdk_blob_mark_clean(struct spdk_blob_data *blob)
}
static int
_spdk_blob_deserialize_xattr(struct spdk_blob_data *blob,
_spdk_blob_deserialize_xattr(struct spdk_blob *blob,
struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
{
struct spdk_xattr *xattr;
@ -289,7 +289,7 @@ _spdk_blob_deserialize_xattr(struct spdk_blob_data *blob,
static int
_spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_data *blob)
_spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
{
struct spdk_blob_md_descriptor *desc;
size_t cur_desc = 0;
@ -419,7 +419,7 @@ _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_dat
static int
_spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
struct spdk_blob_data *blob)
struct spdk_blob *blob)
{
const struct spdk_blob_md_page *page;
uint32_t i;
@ -457,7 +457,7 @@ _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
}
static int
_spdk_blob_serialize_add_page(const struct spdk_blob_data *blob,
_spdk_blob_serialize_add_page(const struct spdk_blob *blob,
struct spdk_blob_md_page **pages,
uint32_t *page_count,
struct spdk_blob_md_page **last_page)
@ -536,7 +536,7 @@ _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
}
static void
_spdk_blob_serialize_extent(const struct spdk_blob_data *blob,
_spdk_blob_serialize_extent(const struct spdk_blob *blob,
uint64_t start_cluster, uint64_t *next_cluster,
uint8_t *buf, size_t buf_sz)
{
@ -593,7 +593,7 @@ _spdk_blob_serialize_extent(const struct spdk_blob_data *blob,
}
static void
_spdk_blob_serialize_flags(const struct spdk_blob_data *blob,
_spdk_blob_serialize_flags(const struct spdk_blob *blob,
uint8_t *buf, size_t *buf_sz)
{
struct spdk_blob_md_descriptor_flags *desc;
@ -615,7 +615,7 @@ _spdk_blob_serialize_flags(const struct spdk_blob_data *blob,
}
static int
_spdk_blob_serialize_xattrs(const struct spdk_blob_data *blob,
_spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
const struct spdk_xattr_tailq *xattrs, bool internal,
struct spdk_blob_md_page **pages,
struct spdk_blob_md_page *cur_page,
@ -667,7 +667,7 @@ _spdk_blob_serialize_xattrs(const struct spdk_blob_data *blob,
}
static int
_spdk_blob_serialize(const struct spdk_blob_data *blob, struct spdk_blob_md_page **pages,
_spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
uint32_t *page_count)
{
struct spdk_blob_md_page *cur_page;
@ -735,7 +735,7 @@ _spdk_blob_serialize(const struct spdk_blob_data *blob, struct spdk_blob_md_page
}
struct spdk_blob_load_ctx {
struct spdk_blob_data *blob;
struct spdk_blob *blob;
struct spdk_blob_md_page *pages;
uint32_t num_pages;
@ -761,7 +761,7 @@ static void
_spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_load_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
struct spdk_blob_md_page *page;
int rc;
uint32_t crc;
@ -826,7 +826,7 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
/* Load a blob from disk given a blobid */
static void
_spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob_data *blob,
_spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_blob_load_ctx *ctx;
@ -869,7 +869,7 @@ _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob_data *blob,
}
struct spdk_blob_persist_ctx {
struct spdk_blob_data *blob;
struct spdk_blob *blob;
struct spdk_blob_md_page *pages;
@ -883,7 +883,7 @@ static void
_spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_persist_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
if (bserrno == 0) {
_spdk_blob_mark_clean(blob);
@ -901,7 +901,7 @@ static void
_spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_persist_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
struct spdk_blob_store *bs = blob->bs;
void *tmp;
size_t i;
@ -934,7 +934,7 @@ static void
_spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_persist_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
struct spdk_blob_store *bs = blob->bs;
spdk_bs_batch_t *batch;
size_t i;
@ -990,7 +990,7 @@ static void
_spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_persist_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
struct spdk_blob_store *bs = blob->bs;
size_t i;
@ -1017,7 +1017,7 @@ static void
_spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_persist_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
struct spdk_blob_store *bs = blob->bs;
uint64_t lba;
uint32_t lba_count;
@ -1056,7 +1056,7 @@ static void
_spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_persist_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
struct spdk_blob_store *bs = blob->bs;
uint64_t lba;
uint32_t lba_count;
@ -1082,7 +1082,7 @@ static void
_spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_persist_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = ctx->blob;
struct spdk_blob *blob = ctx->blob;
struct spdk_blob_store *bs = blob->bs;
uint64_t lba;
uint32_t lba_count;
@ -1114,7 +1114,7 @@ _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int b
}
static int
_spdk_resize_blob(struct spdk_blob_data *blob, uint64_t sz)
_spdk_resize_blob(struct spdk_blob *blob, uint64_t sz)
{
uint64_t i;
uint64_t *tmp;
@ -1189,7 +1189,7 @@ _spdk_resize_blob(struct spdk_blob_data *blob, uint64_t sz)
/* Write a blob to disk */
static void
_spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob_data *blob,
_spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
spdk_bs_sequence_cpl cb_fn, void *cb_arg)
{
struct spdk_blob_persist_ctx *ctx;
@ -1285,7 +1285,7 @@ _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob_data *blob,
}
struct spdk_blob_copy_cluster_ctx {
struct spdk_blob_data *blob;
struct spdk_blob *blob;
uint8_t *buf;
uint64_t page;
uint64_t new_cluster;
@ -1376,7 +1376,7 @@ _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
}
static void
_spdk_bs_allocate_and_copy_cluster(struct spdk_blob_data *blob,
_spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
struct spdk_io_channel *_ch,
uint64_t offset, spdk_bs_user_op_t *op)
{
@ -1456,7 +1456,7 @@ _spdk_bs_allocate_and_copy_cluster(struct spdk_blob_data *blob,
}
static void
_spdk_blob_calculate_lba_and_lba_count(struct spdk_blob_data *blob, uint64_t page, uint64_t length,
_spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t page, uint64_t length,
uint64_t *lba, uint32_t *lba_count)
{
*lba_count = _spdk_bs_page_to_lba(blob->bs, length);
@ -1471,13 +1471,12 @@ _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob_data *blob, uint64_t pag
}
static void
_spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *_blob,
_spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
void *payload, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
{
spdk_bs_batch_t *batch;
struct spdk_bs_cpl cpl;
struct spdk_blob_data *blob = __blob_to_data(_blob);
uint64_t op_length;
uint8_t *buf;
@ -1499,16 +1498,16 @@ _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob
switch (op_type) {
case SPDK_BLOB_READ:
spdk_bs_batch_read_blob(batch, _blob, buf, offset, op_length);
spdk_bs_batch_read_blob(batch, blob, buf, offset, op_length);
break;
case SPDK_BLOB_WRITE:
spdk_bs_batch_write_blob(batch, _blob, buf, offset, op_length);
spdk_bs_batch_write_blob(batch, blob, buf, offset, op_length);
break;
case SPDK_BLOB_UNMAP:
spdk_bs_batch_unmap_blob(batch, _blob, offset, op_length);
spdk_bs_batch_unmap_blob(batch, blob, offset, op_length);
break;
case SPDK_BLOB_WRITE_ZEROES:
spdk_bs_batch_write_zeroes_blob(batch, _blob, offset, op_length);
spdk_bs_batch_write_zeroes_blob(batch, blob, offset, op_length);
break;
case SPDK_BLOB_READV:
case SPDK_BLOB_WRITEV:
@ -1527,11 +1526,10 @@ _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob
}
static void
_spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *_blob,
_spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
void *payload, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
struct spdk_bs_cpl cpl;
uint64_t lba;
uint32_t lba_count;
@ -1588,7 +1586,7 @@ _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blo
/* Queue this operation and allocate the cluster */
spdk_bs_user_op_t *op;
op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, _blob, payload, 0, offset, length);
op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
if (!op) {
cb_fn(cb_arg, -ENOMEM);
return;
@ -1623,12 +1621,10 @@ _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blo
}
static void
_spdk_blob_request_submit_op(struct spdk_blob *_blob, struct spdk_io_channel *_channel,
_spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
void *payload, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
assert(blob != NULL);
if (blob->data_ro && op_type != SPDK_BLOB_READ) {
@ -1642,10 +1638,10 @@ _spdk_blob_request_submit_op(struct spdk_blob *_blob, struct spdk_io_channel *_c
}
if (length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset)) {
_spdk_blob_request_submit_op_single(_channel, _blob, payload, offset, length,
_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
cb_fn, cb_arg, op_type);
} else {
_spdk_blob_request_submit_op_split(_channel, _blob, payload, offset, length,
_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
cb_fn, cb_arg, op_type);
}
}
@ -1675,7 +1671,7 @@ static void
_spdk_rw_iov_split_next(void *cb_arg, int bserrno)
{
struct rw_iov_ctx *ctx = cb_arg;
struct spdk_blob_data *blob = __blob_to_data(ctx->blob);
struct spdk_blob *blob = ctx->blob;
struct iovec *iov, *orig_iov;
int iovcnt;
size_t orig_iovoff;
@ -1742,11 +1738,10 @@ _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
}
static void
_spdk_blob_request_submit_rw_iov(struct spdk_blob *_blob, struct spdk_io_channel *_channel,
_spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
struct spdk_bs_cpl cpl;
assert(blob != NULL);
@ -1820,7 +1815,7 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *_blob, struct spdk_io_channel
/* Queue this operation and allocate the cluster */
spdk_bs_user_op_t *op;
op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, _blob, iov, iovcnt, offset, length);
op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, length);
if (!op) {
cb_fn(cb_arg, -ENOMEM);
return;
@ -1838,7 +1833,7 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *_blob, struct spdk_io_channel
return;
}
ctx->blob = _blob;
ctx->blob = blob;
ctx->channel = _channel;
ctx->cb_fn = cb_fn;
ctx->cb_arg = cb_arg;
@ -1853,10 +1848,10 @@ _spdk_blob_request_submit_rw_iov(struct spdk_blob *_blob, struct spdk_io_channel
}
}
static struct spdk_blob_data *
static struct spdk_blob *
_spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
{
struct spdk_blob_data *blob;
struct spdk_blob *blob;
TAILQ_FOREACH(blob, &bs->blobs, link) {
if (blob->id == blobid) {
@ -1924,7 +1919,7 @@ static void
_spdk_bs_dev_destroy(void *io_device)
{
struct spdk_blob_store *bs = io_device;
struct spdk_blob_data *blob, *blob_tmp;
struct spdk_blob *blob, *blob_tmp;
bs->dev->destroy(bs->dev);
@ -3159,28 +3154,22 @@ spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
return 0;
}
spdk_blob_id spdk_blob_get_id(struct spdk_blob *_blob)
spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
assert(blob != NULL);
return blob->id;
}
uint64_t spdk_blob_get_num_pages(struct spdk_blob *_blob)
uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
assert(blob != NULL);
return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
}
uint64_t spdk_blob_get_num_clusters(struct spdk_blob *_blob)
uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
assert(blob != NULL);
return blob->active.num_clusters;
@ -3191,7 +3180,7 @@ uint64_t spdk_blob_get_num_clusters(struct spdk_blob *_blob)
static void
_spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_data *blob = cb_arg;
struct spdk_blob *blob = cb_arg;
_spdk_blob_free(blob);
@ -3199,7 +3188,7 @@ _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
}
static int
_spdk_blob_set_xattrs(struct spdk_blob_data *blob, const struct spdk_blob_xattr_opts *xattrs,
_spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
bool internal)
{
uint64_t i;
@ -3223,7 +3212,7 @@ _spdk_blob_set_xattrs(struct spdk_blob_data *blob, const struct spdk_blob_xattr_
}
static void
_spdk_blob_set_thin_provision(struct spdk_blob_data *blob)
_spdk_blob_set_thin_provision(struct spdk_blob *blob)
{
blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
blob->state = SPDK_BLOB_STATE_DIRTY;
@ -3232,7 +3221,7 @@ _spdk_blob_set_thin_provision(struct spdk_blob_data *blob)
void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
{
struct spdk_blob_data *blob;
struct spdk_blob *blob;
uint32_t page_idx;
struct spdk_bs_cpl cpl;
struct spdk_blob_opts opts_default;
@ -3273,7 +3262,7 @@ void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_
_spdk_blob_set_thin_provision(blob);
}
rc = spdk_blob_resize(__data_to_blob(blob), opts->num_clusters);
rc = spdk_blob_resize(blob, opts->num_clusters);
if (rc < 0) {
_spdk_blob_free(blob);
cb_fn(cb_arg, 0, rc);
@ -3304,9 +3293,8 @@ void spdk_bs_create_blob(struct spdk_blob_store *bs,
/* START spdk_blob_resize */
int
spdk_blob_resize(struct spdk_blob *_blob, uint64_t sz)
spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
int rc;
assert(blob != NULL);
@ -3346,8 +3334,7 @@ _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
static void
_spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob *_blob = cb_arg;
struct spdk_blob_data *blob = __blob_to_data(_blob);
struct spdk_blob *blob = cb_arg;
if (bserrno != 0) {
/*
@ -3367,14 +3354,13 @@ _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
* points into code that touches the blob->open_ref count
* and the blobstore's blob list.
*/
spdk_blob_close(_blob, _spdk_bs_delete_close_cpl, seq);
spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
}
static void
_spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
_spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
{
spdk_bs_sequence_t *seq = cb_arg;
struct spdk_blob_data *blob = __blob_to_data(_blob);
uint32_t page_num;
if (bserrno != 0) {
@ -3403,7 +3389,7 @@ _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
blob->active.num_pages = 0;
_spdk_resize_blob(blob, 0);
_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, _blob);
_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
}
void
@ -3435,7 +3421,7 @@ spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
static void
_spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_data *blob = cb_arg;
struct spdk_blob *blob = cb_arg;
/* If the blob have crc error, we just return NULL. */
if (blob == NULL) {
@ -3454,7 +3440,7 @@ _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
{
struct spdk_blob_data *blob;
struct spdk_blob *blob;
struct spdk_bs_cpl cpl;
spdk_bs_sequence_t *seq;
uint32_t page_num;
@ -3471,7 +3457,7 @@ void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
blob = _spdk_blob_lookup(bs, blobid);
if (blob) {
blob->open_ref++;
cb_fn(cb_arg, __data_to_blob(blob), 0);
cb_fn(cb_arg, blob, 0);
return;
}
@ -3484,7 +3470,7 @@ void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
cpl.u.blob_handle.cb_fn = cb_fn;
cpl.u.blob_handle.cb_arg = cb_arg;
cpl.u.blob_handle.blob = __data_to_blob(blob);
cpl.u.blob_handle.blob = blob;
seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) {
@ -3498,10 +3484,8 @@ void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
/* END spdk_bs_open_blob */
/* START spdk_blob_set_read_only */
void spdk_blob_set_read_only(struct spdk_blob *b)
void spdk_blob_set_read_only(struct spdk_blob *blob)
{
struct spdk_blob_data *blob = __blob_to_data(b);
assert(spdk_get_thread() == blob->bs->md_thread);
blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
@ -3515,7 +3499,7 @@ void spdk_blob_set_read_only(struct spdk_blob *b)
static void
_spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_data *blob = __blob_to_data(cb_arg);
struct spdk_blob *blob = cb_arg;
if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
blob->data_ro = true;
@ -3526,7 +3510,7 @@ _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
}
static void
_spdk_blob_sync_md(struct spdk_blob_data *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
_spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
{
struct spdk_bs_cpl cpl;
spdk_bs_sequence_t *seq;
@ -3545,10 +3529,8 @@ _spdk_blob_sync_md(struct spdk_blob_data *blob, spdk_blob_op_complete cb_fn, voi
}
void
spdk_blob_sync_md(struct spdk_blob *_blob, spdk_blob_op_complete cb_fn, void *cb_arg)
spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
{
struct spdk_blob_data *blob = __blob_to_data(_blob);
assert(blob != NULL);
assert(spdk_get_thread() == blob->bs->md_thread);
@ -3575,7 +3557,7 @@ spdk_blob_sync_md(struct spdk_blob *_blob, spdk_blob_op_complete cb_fn, void *cb
struct spdk_blob_insert_cluster_ctx {
struct spdk_thread *thread;
struct spdk_blob_data *blob;
struct spdk_blob *blob;
uint32_t cluster_num; /* cluster index in blob */
uint32_t cluster; /* cluster on disk */
int rc;
@ -3617,7 +3599,7 @@ _spdk_blob_insert_cluster_msg(void *arg)
}
void
_spdk_blob_insert_cluster_on_md_thread(struct spdk_blob_data *blob, uint32_t cluster_num,
_spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg)
{
struct spdk_blob_insert_cluster_ctx *ctx;
@ -3643,7 +3625,7 @@ _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob_data *blob, uint32_t clu
static void
_spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_blob_data *blob = cb_arg;
struct spdk_blob *blob = cb_arg;
if (bserrno == 0) {
blob->open_ref--;
@ -3664,14 +3646,11 @@ _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
spdk_bs_sequence_finish(seq, bserrno);
}
void spdk_blob_close(struct spdk_blob *b, spdk_blob_op_complete cb_fn, void *cb_arg)
void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
{
struct spdk_bs_cpl cpl;
struct spdk_blob_data *blob;
spdk_bs_sequence_t *seq;
assert(b != NULL);
blob = __blob_to_data(b);
assert(blob != NULL);
assert(spdk_get_thread() == blob->bs->md_thread);
@ -3823,14 +3802,11 @@ _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
}
void
spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *b,
spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
{
struct spdk_bs_iter_ctx *ctx;
struct spdk_blob_data *blob;
assert(b != NULL);
blob = __blob_to_data(b);
assert(blob != NULL);
ctx = calloc(1, sizeof(*ctx));
@ -3845,11 +3821,11 @@ spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *b,
ctx->cb_arg = cb_arg;
/* Close the existing blob */
spdk_blob_close(b, _spdk_bs_iter_close_cpl, ctx);
spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
}
static int
_spdk_blob_set_xattr(struct spdk_blob_data *blob, const char *name, const void *value,
_spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
uint16_t value_len, bool internal)
{
struct spdk_xattr_tailq *xattrs;
@ -3903,11 +3879,11 @@ int
spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
uint16_t value_len)
{
return _spdk_blob_set_xattr(__blob_to_data(blob), name, value, value_len, false);
return _spdk_blob_set_xattr(blob, name, value, value_len, false);
}
static int
_spdk_blob_remove_xattr(struct spdk_blob_data *blob, const char *name, bool internal)
_spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
{
struct spdk_xattr_tailq *xattrs;
struct spdk_xattr *xattr;
@ -3944,11 +3920,11 @@ _spdk_blob_remove_xattr(struct spdk_blob_data *blob, const char *name, bool inte
int
spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
{
return _spdk_blob_remove_xattr(__blob_to_data(blob), name, false);
return _spdk_blob_remove_xattr(blob, name, false);
}
static int
_spdk_blob_get_xattr_value(struct spdk_blob_data *blob, const char *name,
_spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
const void **value, size_t *value_len, bool internal)
{
struct spdk_xattr *xattr;
@ -3970,7 +3946,7 @@ int
spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
const void **value, size_t *value_len)
{
return _spdk_blob_get_xattr_value(__blob_to_data(blob), name, value, value_len, false);
return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
}
struct spdk_xattr_names {
@ -4001,9 +3977,9 @@ _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_na
}
int
spdk_blob_get_xattr_names(struct spdk_blob *_blob, struct spdk_xattr_names **names)
spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
{
return _spdk_blob_get_xattr_names(&__blob_to_data(_blob)->xattrs, names);
return _spdk_blob_get_xattr_names(&blob->xattrs, names);
}
uint32_t

View File

@ -110,7 +110,7 @@ enum spdk_blob_state {
TAILQ_HEAD(spdk_xattr_tailq, spdk_xattr);
struct spdk_blob_data {
struct spdk_blob {
struct spdk_blob_store *bs;
uint32_t open_ref;
@ -143,12 +143,9 @@ struct spdk_blob_data {
struct spdk_xattr_tailq xattrs;
struct spdk_xattr_tailq xattrs_internal;
TAILQ_ENTRY(spdk_blob_data) link;
TAILQ_ENTRY(spdk_blob) link;
};
#define __blob_to_data(x) ((struct spdk_blob_data *)(x))
#define __data_to_blob(x) ((struct spdk_blob *)(x))
struct spdk_blob_store {
uint64_t md_start; /* Offset from beginning of disk, in pages */
uint32_t md_len; /* Count, in pages */
@ -178,7 +175,7 @@ struct spdk_blob_store {
struct spdk_bs_cpl unload_cpl;
int unload_err;
TAILQ_HEAD(, spdk_blob_data) blobs;
TAILQ_HEAD(, spdk_blob) blobs;
};
struct spdk_bs_channel {
@ -442,13 +439,13 @@ _spdk_bs_lba_to_cluster(struct spdk_blob_store *bs, uint64_t lba)
}
static inline uint64_t
_spdk_bs_blob_lba_to_back_dev_lba(struct spdk_blob_data *blob, uint64_t lba)
_spdk_bs_blob_lba_to_back_dev_lba(struct spdk_blob *blob, uint64_t lba)
{
return lba * blob->bs->dev->blocklen / blob->back_bs_dev->blocklen;
}
static inline uint64_t
_spdk_bs_blob_lba_from_back_dev_lba(struct spdk_blob_data *blob, uint64_t lba)
_spdk_bs_blob_lba_from_back_dev_lba(struct spdk_blob *blob, uint64_t lba)
{
return lba * blob->back_bs_dev->blocklen / blob->bs->dev->blocklen;
}
@ -475,7 +472,7 @@ _spdk_bs_page_to_blobid(uint32_t page_idx)
* start of that page.
*/
static inline uint64_t
_spdk_bs_blob_page_to_lba(struct spdk_blob_data *blob, uint32_t page)
_spdk_bs_blob_page_to_lba(struct spdk_blob *blob, uint32_t page)
{
uint64_t lba;
uint32_t pages_per_cluster;
@ -494,7 +491,7 @@ _spdk_bs_blob_page_to_lba(struct spdk_blob_data *blob, uint32_t page)
* next cluster boundary.
*/
static inline uint32_t
_spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob_data *blob, uint32_t page)
_spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob *blob, uint32_t page)
{
uint32_t pages_per_cluster;
@ -505,7 +502,7 @@ _spdk_bs_num_pages_to_cluster_boundary(struct spdk_blob_data *blob, uint32_t pag
/* Given a page offset into a blob, look up the number of pages into blob to beginning of current cluster */
static inline uint32_t
_spdk_bs_page_to_cluster_start(struct spdk_blob_data *blob, uint32_t page)
_spdk_bs_page_to_cluster_start(struct spdk_blob *blob, uint32_t page)
{
uint32_t pages_per_cluster;
@ -516,7 +513,7 @@ _spdk_bs_page_to_cluster_start(struct spdk_blob_data *blob, uint32_t page)
/* Given a page offset into a blob, look up if it is from allocated cluster. */
static inline bool
_spdk_bs_page_is_allocated(struct spdk_blob_data *blob, uint32_t page)
_spdk_bs_page_is_allocated(struct spdk_blob *blob, uint32_t page)
{
uint64_t lba;
uint32_t pages_per_cluster;

View File

@ -389,7 +389,7 @@ blob_thin_provision(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
CU_ASSERT(__blob_to_data(blob)->invalid_flags & SPDK_BLOB_THIN_PROV);
CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -411,7 +411,7 @@ blob_thin_provision(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
CU_ASSERT(__blob_to_data(blob)->invalid_flags & SPDK_BLOB_THIN_PROV);
CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -483,10 +483,10 @@ blob_resize(void)
blob = g_blob;
/* Confirm that resize fails if blob is marked read-only. */
__blob_to_data(blob)->md_ro = true;
blob->md_ro = true;
rc = spdk_blob_resize(blob, 5);
CU_ASSERT(rc == -EPERM);
__blob_to_data(blob)->md_ro = false;
blob->md_ro = false;
/* The blob started at 0 clusters. Resize it to be 5. */
rc = spdk_blob_resize(blob, 5);
@ -532,7 +532,6 @@ blob_read_only(void)
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_blob *blob;
struct spdk_blob_data *blob_data;
struct spdk_bs_opts opts;
spdk_blob_id blobid;
@ -557,15 +556,14 @@ blob_read_only(void)
spdk_blob_set_read_only(blob);
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->data_ro == false);
CU_ASSERT(blob_data->md_ro == false);
CU_ASSERT(blob->data_ro == false);
CU_ASSERT(blob->md_ro == false);
spdk_blob_sync_md(blob, bs_op_complete, NULL);
CU_ASSERT(blob_data->data_ro == true);
CU_ASSERT(blob_data->md_ro == true);
CU_ASSERT(blob_data->data_ro_flags & SPDK_BLOB_READ_ONLY);
CU_ASSERT(blob->data_ro == true);
CU_ASSERT(blob->md_ro == true);
CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -575,10 +573,9 @@ blob_read_only(void)
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->data_ro == true);
CU_ASSERT(blob_data->md_ro == true);
CU_ASSERT(blob_data->data_ro_flags & SPDK_BLOB_READ_ONLY);
CU_ASSERT(blob->data_ro == true);
CU_ASSERT(blob->md_ro == true);
CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -601,10 +598,9 @@ blob_read_only(void)
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->data_ro == true);
CU_ASSERT(blob_data->md_ro == true);
CU_ASSERT(blob_data->data_ro_flags & SPDK_BLOB_READ_ONLY);
CU_ASSERT(blob->data_ro == true);
CU_ASSERT(blob->md_ro == true);
CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -681,10 +677,10 @@ blob_write(void)
CU_ASSERT(rc == 0);
/* Confirm that write fails if blob is marked read-only. */
__blob_to_data(blob)->data_ro = true;
blob->data_ro = true;
spdk_bs_io_write_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == -EPERM);
__blob_to_data(blob)->data_ro = false;
blob->data_ro = false;
/* Write to the blob */
spdk_bs_io_write_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
@ -753,10 +749,10 @@ blob_read(void)
CU_ASSERT(rc == 0);
/* Confirm that read passes if blob is marked read-only. */
__blob_to_data(blob)->data_ro = true;
blob->data_ro = true;
spdk_bs_io_read_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
__blob_to_data(blob)->data_ro = false;
blob->data_ro = false;
/* Read from the blob */
spdk_bs_io_read_blob(blob, channel, payload, 0, 1, blob_op_complete, NULL);
@ -881,9 +877,9 @@ blob_rw_verify_iov(void)
* that cross cluster boundaries. Start by asserting that the allocated
* clusters are where we expect before modifying the second cluster.
*/
CU_ASSERT(__blob_to_data(blob)->active.clusters[0] == 1 * 256);
CU_ASSERT(__blob_to_data(blob)->active.clusters[1] == 2 * 256);
__blob_to_data(blob)->active.clusters[1] = 3 * 256;
CU_ASSERT(blob->active.clusters[0] == 1 * 256);
CU_ASSERT(blob->active.clusters[1] == 2 * 256);
blob->active.clusters[1] = 3 * 256;
memset(payload_write, 0xE5, sizeof(payload_write));
iov_write[0].iov_base = payload_write;
@ -1043,7 +1039,7 @@ blob_rw_iov_read_only(void)
CU_ASSERT(rc == 0);
/* Verify that writev failed if read_only flag is set. */
__blob_to_data(blob)->data_ro = true;
blob->data_ro = true;
iov_write.iov_base = payload_write;
iov_write.iov_len = sizeof(payload_write);
spdk_bs_io_writev_blob(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
@ -1071,7 +1067,6 @@ blob_unmap(void)
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_blob *blob;
struct spdk_blob_data *_blob;
struct spdk_io_channel *channel;
spdk_blob_id blobid;
struct spdk_blob_opts opts;
@ -1101,7 +1096,6 @@ blob_unmap(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
_blob = __blob_to_data(blob);
rc = spdk_blob_resize(blob, 10);
CU_ASSERT(rc == 0);
@ -1127,11 +1121,11 @@ blob_unmap(void)
}
/* Mark some clusters as unallocated */
_blob->active.clusters[1] = 0;
_blob->active.clusters[2] = 0;
_blob->active.clusters[3] = 0;
_blob->active.clusters[6] = 0;
_blob->active.clusters[8] = 0;
blob->active.clusters[1] = 0;
blob->active.clusters[2] = 0;
blob->active.clusters[3] = 0;
blob->active.clusters[6] = 0;
blob->active.clusters[8] = 0;
/* Unmap clusters by resizing to 0 */
rc = spdk_blob_resize(blob, 0);
@ -1238,11 +1232,11 @@ blob_xattr(void)
blob = g_blob;
/* Test that set_xattr fails if md_ro flag is set. */
__blob_to_data(blob)->md_ro = true;
blob->md_ro = true;
rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
CU_ASSERT(rc == -EPERM);
__blob_to_data(blob)->md_ro = false;
blob->md_ro = false;
rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
CU_ASSERT(rc == 0);
@ -1257,13 +1251,13 @@ blob_xattr(void)
/* get_xattr should still work even if md_ro flag is set. */
value = NULL;
__blob_to_data(blob)->md_ro = true;
blob->md_ro = true;
rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(value != NULL);
CU_ASSERT(*(uint64_t *)value == length);
CU_ASSERT(value_len == 8);
__blob_to_data(blob)->md_ro = false;
blob->md_ro = false;
rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
CU_ASSERT(rc == -ENOENT);
@ -1283,11 +1277,11 @@ blob_xattr(void)
spdk_xattr_names_free(names);
/* Confirm that remove_xattr fails if md_ro is set to true. */
__blob_to_data(blob)->md_ro = true;
blob->md_ro = true;
rc = spdk_blob_remove_xattr(blob, "name");
CU_ASSERT(rc == -EPERM);
__blob_to_data(blob)->md_ro = false;
blob->md_ro = false;
rc = spdk_blob_remove_xattr(blob, "name");
CU_ASSERT(rc == 0);
@ -1296,18 +1290,18 @@ blob_xattr(void)
/* Set internal xattr */
length = 7898;
rc = _spdk_blob_set_xattr(__blob_to_data(blob), "internal", &length, sizeof(length), true);
rc = _spdk_blob_set_xattr(blob, "internal", &length, sizeof(length), true);
CU_ASSERT(rc == 0);
rc = _spdk_blob_get_xattr_value(__blob_to_data(blob), "internal", &value, &value_len, true);
rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true);
CU_ASSERT(rc == 0);
CU_ASSERT(*(uint64_t *)value == length);
/* try to get public xattr with same name */
rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
CU_ASSERT(rc != 0);
rc = _spdk_blob_get_xattr_value(__blob_to_data(blob), "internal", &value, &value_len, false);
rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, false);
CU_ASSERT(rc != 0);
/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
CU_ASSERT((__blob_to_data(blob)->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
SPDK_BLOB_INTERNAL_XATTR)
spdk_blob_close(blob, blob_op_complete, NULL);
@ -1328,7 +1322,7 @@ blob_xattr(void)
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
rc = _spdk_blob_get_xattr_value(__blob_to_data(blob), "internal", &value, &value_len, true);
rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true);
CU_ASSERT(rc == 0);
CU_ASSERT(*(uint64_t *)value == length);
@ -1336,10 +1330,10 @@ blob_xattr(void)
rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
CU_ASSERT(rc != 0);
rc = _spdk_blob_remove_xattr(__blob_to_data(blob), "internal", true);
rc = _spdk_blob_remove_xattr(blob, "internal", true);
CU_ASSERT(rc == 0);
CU_ASSERT((__blob_to_data(blob)->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
CU_ASSERT(g_bserrno == 0);
g_bs = NULL;
@ -2448,12 +2442,12 @@ blob_flags(void)
rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
CU_ASSERT(rc == 0);
__blob_to_data(blob_invalid)->invalid_flags = (1ULL << 63);
__blob_to_data(blob_invalid)->state = SPDK_BLOB_STATE_DIRTY;
__blob_to_data(blob_data_ro)->data_ro_flags = (1ULL << 62);
__blob_to_data(blob_data_ro)->state = SPDK_BLOB_STATE_DIRTY;
__blob_to_data(blob_md_ro)->md_ro_flags = (1ULL << 61);
__blob_to_data(blob_md_ro)->state = SPDK_BLOB_STATE_DIRTY;
blob_invalid->invalid_flags = (1ULL << 63);
blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
blob_data_ro->data_ro_flags = (1ULL << 62);
blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
blob_md_ro->md_ro_flags = (1ULL << 61);
blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
g_bserrno = -1;
spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
@ -2505,8 +2499,8 @@ blob_flags(void)
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob_data_ro = g_blob;
/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
CU_ASSERT(__blob_to_data(blob_data_ro)->data_ro == true);
CU_ASSERT(__blob_to_data(blob_data_ro)->md_ro == true);
CU_ASSERT(blob_data_ro->data_ro == true);
CU_ASSERT(blob_data_ro->md_ro == true);
CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
g_blob = NULL;
@ -2515,8 +2509,8 @@ blob_flags(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob_md_ro = g_blob;
CU_ASSERT(__blob_to_data(blob_md_ro)->data_ro == false);
CU_ASSERT(__blob_to_data(blob_md_ro)->md_ro == true);
CU_ASSERT(blob_md_ro->data_ro == false);
CU_ASSERT(blob_md_ro->md_ro == true);
g_bserrno = -1;
spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
@ -2752,7 +2746,6 @@ blob_thin_prov_alloc(void)
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_blob *blob;
struct spdk_blob_data *blob_data;
struct spdk_blob_opts opts;
spdk_blob_id blobid;
uint64_t free_clusters;
@ -2780,30 +2773,29 @@ blob_thin_prov_alloc(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->active.num_clusters == 0);
CU_ASSERT(blob->active.num_clusters == 0);
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
rc = spdk_blob_resize(blob, 5);
CU_ASSERT(rc == 0);
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 5);
CU_ASSERT(blob->active.num_clusters == 5);
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
/* Shrink the blob to 3 clusters - still unallocated */
rc = spdk_blob_resize(blob, 3);
CU_ASSERT(rc == 0);
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 3);
CU_ASSERT(blob->active.num_clusters == 3);
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
spdk_blob_sync_md(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Sync must not change anything */
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 3);
CU_ASSERT(blob->active.num_clusters == 3);
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
spdk_blob_close(blob, blob_op_complete, NULL);
@ -2828,11 +2820,10 @@ blob_thin_prov_alloc(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
/* Check that clusters allocation and size is still the same */
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 3);
CU_ASSERT(blob->active.num_clusters == 3);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -2851,7 +2842,6 @@ blob_insert_cluster_msg(void)
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_blob *blob;
struct spdk_blob_data *blob_data;
struct spdk_blob_opts opts;
spdk_blob_id blobid;
uint64_t free_clusters;
@ -2879,16 +2869,15 @@ blob_insert_cluster_msg(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->active.num_clusters == 4);
CU_ASSERT(blob->active.num_clusters == 4);
CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
CU_ASSERT(blob_data->active.clusters[1] == 0);
CU_ASSERT(blob->active.clusters[1] == 0);
_spdk_bs_claim_cluster(bs, 0xF);
_spdk_blob_insert_cluster_on_md_thread(blob_data, 1, 0xF, blob_op_complete, NULL);
_spdk_blob_insert_cluster_on_md_thread(blob, 1, 0xF, blob_op_complete, NULL);
CU_ASSERT(blob_data->active.clusters[1] != 0);
CU_ASSERT(blob->active.clusters[1] != 0);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -2912,9 +2901,8 @@ blob_insert_cluster_msg(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->active.clusters[1] != 0);
CU_ASSERT(blob->active.clusters[1] != 0);
spdk_blob_close(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
@ -2934,7 +2922,6 @@ blob_thin_prov_rw(void)
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_blob *blob;
struct spdk_blob_data *blob_data;
struct spdk_io_channel *channel;
struct spdk_blob_opts opts;
spdk_blob_id blobid;
@ -2967,21 +2954,20 @@ blob_thin_prov_rw(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->active.num_clusters == 0);
CU_ASSERT(blob->active.num_clusters == 0);
/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
rc = spdk_blob_resize(blob, 5);
CU_ASSERT(rc == 0);
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 5);
CU_ASSERT(blob->active.num_clusters == 5);
spdk_blob_sync_md(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Sync must not change anything */
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 5);
CU_ASSERT(blob->active.num_clusters == 5);
/* Payload should be all zeros from unallocated clusters */
memset(payload_read, 0xFF, sizeof(payload_read));
@ -3022,7 +3008,6 @@ blob_thin_prov_rw_iov(void)
struct spdk_blob_store *bs;
struct spdk_bs_dev *dev;
struct spdk_blob *blob;
struct spdk_blob_data *blob_data;
struct spdk_io_channel *channel;
struct spdk_blob_opts opts;
spdk_blob_id blobid;
@ -3058,21 +3043,20 @@ blob_thin_prov_rw_iov(void)
CU_ASSERT(g_bserrno == 0);
SPDK_CU_ASSERT_FATAL(g_blob != NULL);
blob = g_blob;
blob_data = __blob_to_data(blob);
CU_ASSERT(blob_data->active.num_clusters == 0);
CU_ASSERT(blob->active.num_clusters == 0);
/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
rc = spdk_blob_resize(blob, 5);
CU_ASSERT(rc == 0);
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 5);
CU_ASSERT(blob->active.num_clusters == 5);
spdk_blob_sync_md(blob, blob_op_complete, NULL);
CU_ASSERT(g_bserrno == 0);
/* Sync must not change anything */
CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
CU_ASSERT(blob_data->active.num_clusters == 5);
CU_ASSERT(blob->active.num_clusters == 5);
/* Payload should be all zeros from unallocated clusters */
memset(payload_read, 0xAA, sizeof(payload_read));