diff --git a/lib/blob/blobstore.c b/lib/blob/blobstore.c index c1b113b8cd..23bd1f905b 100644 --- a/lib/blob/blobstore.c +++ b/lib/blob/blobstore.c @@ -885,9 +885,8 @@ _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob, uint64_t i, et_idx; uint32_t extent_page, ep_len; - /* The buffer must have room for at least one extent page */ - cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters) + sizeof( - desc->extent_page[0]); + /* The buffer must have room for at least num_clusters entry */ + cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters); if (*remaining_sz < cur_sz) { *next_ep = start_ep; return; @@ -898,14 +897,18 @@ _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob, desc->num_clusters = blob->active.num_clusters; - extent_page = blob->active.extent_pages[start_ep]; ep_len = 1; et_idx = 0; - for (i = start_ep + 1; i < blob->active.num_extent_pages; i++) { - /* Extent table entries contain md page offsets for extent pages. - * Zeroes represent unallocated extent pages, which are run-length-encoded. - */ - if (extent_page == 0 && blob->active.extent_pages[i] == 0) { + for (i = start_ep; i < blob->active.num_extent_pages; i++) { + if (*remaining_sz < cur_sz + sizeof(desc->extent_page[0])) { + /* If we ran out of buffer space, return */ + break; + } + + extent_page = blob->active.extent_pages[i]; + /* Verify that next extent_page is unallocated */ + if (extent_page == 0 && + (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) { ep_len++; continue; } @@ -913,24 +916,10 @@ _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob, desc->extent_page[et_idx].num_pages = ep_len; et_idx++; - cur_sz += sizeof(desc->extent_page[et_idx]); - - if (*remaining_sz < cur_sz) { - /* If we ran out of buffer space, return */ - *next_ep = i; - break; - } - extent_page = blob->active.extent_pages[i]; ep_len = 1; + cur_sz += sizeof(desc->extent_page[et_idx]); } - - if (*remaining_sz >= cur_sz) { - desc->extent_page[et_idx].page_idx = extent_page; - desc->extent_page[et_idx].num_pages = ep_len; - et_idx++; - - *next_ep = blob->active.num_extent_pages; - } + *next_ep = i; desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx; *remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length; @@ -948,7 +937,9 @@ _spdk_blob_serialize_extent_table(const struct spdk_blob *blob, int rc; last_extent_page = 0; - while (last_extent_page < blob->active.num_extent_pages) { + /* At least single extent table entry has to be always persisted. + * Such case occurs with num_extent_pages == 0. */ + while (last_extent_page <= blob->active.num_extent_pages) { _spdk_blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf, remaining_sz); @@ -2007,6 +1998,7 @@ _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int /* Writing out new extent page for the first time. Either active extent pages is larger * than clean extent pages or there was no extent page assigned due to thin provisioning. */ if (i >= blob->clean.extent_pages_array_size || blob->clean.extent_pages[i] == 0) { + blob->state = SPDK_BLOB_STATE_DIRTY; assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id)); ctx->next_extent_page = i + 1; _spdk_blob_persist_write_extent_page(extent_page_id, i * SPDK_EXTENTS_PER_EP, ctx);