blob: add return codes to bs_user_op_abort

Prior to this patch bs_user_op_abort() always
returned EIO back to the bdev layer.

This is not sufficient for ENOMEM cases where
the I/O should be resubmitted by the bdev layer.

ENOMEM for bs_sequence_start() in bs_allocate_and_copy_cluster()
specifically addresses issue #2306.

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: Icfb0ce9ca20e1c4dd1668ba77d121f7091acb044
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11764
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Tomasz Zawadzki 2022-02-28 15:05:48 +01:00
parent a7d174e2ef
commit 0b034da148
3 changed files with 10 additions and 10 deletions

View File

@ -2349,7 +2349,7 @@ blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
if (bserrno == 0) {
bs_user_op_execute(op);
} else {
bs_user_op_abort(op);
bs_user_op_abort(op, bserrno);
}
}
@ -2447,7 +2447,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob,
ctx = calloc(1, sizeof(*ctx));
if (!ctx) {
bs_user_op_abort(op);
bs_user_op_abort(op, -ENOMEM);
return;
}
@ -2463,7 +2463,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob,
SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
blob->bs->cluster_sz);
free(ctx);
bs_user_op_abort(op);
bs_user_op_abort(op, -ENOMEM);
return;
}
}
@ -2475,7 +2475,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob,
if (rc != 0) {
spdk_free(ctx->buf);
free(ctx);
bs_user_op_abort(op);
bs_user_op_abort(op, rc);
return;
}
@ -2490,7 +2490,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob,
pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
spdk_free(ctx->buf);
free(ctx);
bs_user_op_abort(op);
bs_user_op_abort(op, -ENOMEM);
return;
}
@ -3141,13 +3141,13 @@ bs_channel_destroy(void *io_device, void *ctx_buf)
while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
op = TAILQ_FIRST(&channel->need_cluster_alloc);
TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
bs_user_op_abort(op);
bs_user_op_abort(op, -EIO);
}
while (!TAILQ_EMPTY(&channel->queued_io)) {
op = TAILQ_FIRST(&channel->queued_io);
TAILQ_REMOVE(&channel->queued_io, op, link);
bs_user_op_abort(op);
bs_user_op_abort(op, -EIO);
}
free(channel->req_mem);

View File

@ -494,13 +494,13 @@ bs_user_op_execute(spdk_bs_user_op_t *op)
}
void
bs_user_op_abort(spdk_bs_user_op_t *op)
bs_user_op_abort(spdk_bs_user_op_t *op, int bserrno)
{
struct spdk_bs_request_set *set;
set = (struct spdk_bs_request_set *)op;
set->cpl.u.blob_basic.cb_fn(set->cpl.u.blob_basic.cb_arg, -EIO);
set->cpl.u.blob_basic.cb_fn(set->cpl.u.blob_basic.cb_arg, bserrno);
TAILQ_INSERT_TAIL(&set->channel->reqs, set, link);
}

View File

@ -210,6 +210,6 @@ spdk_bs_user_op_t *bs_user_op_alloc(struct spdk_io_channel *channel, struct spdk
void bs_user_op_execute(spdk_bs_user_op_t *op);
void bs_user_op_abort(spdk_bs_user_op_t *op);
void bs_user_op_abort(spdk_bs_user_op_t *op, int bserrno);
#endif