lib/blob: check bserrno on each step of bs_load

Before this change it was possible to fail at
writing out some of used md pages.
bserrno output of those was not verified.

This patch adds it at every step.

With that two function don't need (and never needed)
to pass the bserrno:
_spdk_bs_load_write_used_md()
spdk_bs_load_complete()

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I1a61763f03665ba1b00e5949ef0cf37eefaaf08f
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/482008
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Tomasz Zawadzki 2020-01-17 05:52:35 -05:00
parent cf5df9b41d
commit f7bd1e1eb9

View File

@ -2907,7 +2907,7 @@ _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
}
static void
_spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx, int bserrno)
_spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx)
{
spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
}
@ -2936,7 +2936,7 @@ _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrn
return;
}
_spdk_bs_load_complete(ctx, bserrno);
_spdk_bs_load_complete(ctx);
}
static void
@ -2946,6 +2946,11 @@ _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
uint64_t lba, lba_count, mask_size;
int rc;
if (bserrno != 0) {
_spdk_bs_load_ctx_fail(ctx, bserrno);
return;
}
/* The type must be correct */
assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
@ -2987,6 +2992,11 @@ _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
uint64_t lba, lba_count, mask_size;
int rc;
if (bserrno != 0) {
_spdk_bs_load_ctx_fail(ctx, bserrno);
return;
}
/* The type must be correct */
assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
@ -3123,7 +3133,12 @@ _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int
{
struct spdk_bs_load_ctx *ctx = cb_arg;
_spdk_bs_load_complete(ctx, bserrno);
if (bserrno != 0) {
_spdk_bs_load_ctx_fail(ctx, bserrno);
return;
}
_spdk_bs_load_complete(ctx);
}
static void
@ -3134,6 +3149,11 @@ _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int
spdk_free(ctx->mask);
ctx->mask = NULL;
if (bserrno != 0) {
_spdk_bs_load_ctx_fail(ctx, bserrno);
return;
}
_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_load_write_used_clusters_cpl);
}
@ -3145,11 +3165,16 @@ _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bs
spdk_free(ctx->mask);
ctx->mask = NULL;
if (bserrno != 0) {
_spdk_bs_load_ctx_fail(ctx, bserrno);
return;
}
_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_load_write_used_blobids_cpl);
}
static void
_spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx, int bserrno)
_spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
{
_spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl);
}
@ -3203,7 +3228,7 @@ _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
_spdk_bs_claim_cluster(ctx->bs, i);
}
spdk_free(ctx->page);
_spdk_bs_load_write_used_md(ctx, bserrno);
_spdk_bs_load_write_used_md(ctx);
}
}