idxd: fix busy handling

In several functions. Busy handling also maans paying attention
to the rc when submitting a batch and not clearing chan->batch
unless the call was a success.

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: Ic45b10ade2ebdcd845dc33e54dd9c93068ceb98c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11221
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
This commit is contained in:
paul luse 2022-01-24 12:49:36 -07:00 committed by Tomasz Zawadzki
parent 234a9ad9e2
commit d5e63730ae
2 changed files with 122 additions and 51 deletions

View File

@ -259,8 +259,8 @@ spdk_idxd_put_channel(struct spdk_idxd_io_channel *chan)
assert(chan != NULL);
if (chan->batch) {
assert(chan->batch->transparent);
idxd_batch_cancel(chan, chan->batch);
chan->batch = NULL;
}
pthread_mutex_lock(&chan->idxd->num_channels_lock);
@ -401,8 +401,7 @@ _idxd_prep_batch_cmd(struct spdk_idxd_io_channel *chan, spdk_idxd_req_cb cb_fn,
assert(batch != NULL); /* suppress scan-build warning. */
if (batch->index == DESC_PER_BATCH) {
SPDK_ERRLOG("Attempt to add to a batch that is already full.\n");
return -EINVAL;
return -EBUSY;
}
desc = *_desc = &batch->user_desc[batch->index];
@ -422,7 +421,7 @@ _idxd_prep_batch_cmd(struct spdk_idxd_io_channel *chan, spdk_idxd_req_cb cb_fn,
}
static struct idxd_batch *
idxd_batch_create(struct spdk_idxd_io_channel *chan)
idxd_batch_create(struct spdk_idxd_io_channel *chan, bool transparent)
{
struct idxd_batch *batch;
@ -432,6 +431,11 @@ idxd_batch_create(struct spdk_idxd_io_channel *chan)
batch = TAILQ_FIRST(&chan->batch_pool);
batch->index = 0;
batch->chan = chan;
batch->transparent = transparent;
if (transparent) {
/* this is the active transparent batch */
chan->batch = batch;
}
TAILQ_REMOVE(&chan->batch_pool, batch, link);
} else {
/* The application needs to handle this. */
@ -461,11 +465,15 @@ idxd_batch_cancel(struct spdk_idxd_io_channel *chan, struct idxd_batch *batch)
return -EINVAL;
}
if (batch->index > 0) {
if (batch->index == UINT8_MAX) {
SPDK_ERRLOG("Cannot cancel batch, already submitted to HW.\n");
return -EINVAL;
}
if (batch->transparent) {
chan->batch = NULL;
}
_free_batch(batch, chan);
return 0;
@ -507,7 +515,8 @@ idxd_batch_submit(struct spdk_idxd_io_channel *chan, struct idxd_batch *batch,
op->cb_fn = batch->user_ops[0].cb_fn;
op->cb_arg = batch->user_ops[0].cb_arg;
op->crc_dst = batch->user_ops[0].crc_dst;
_free_batch(batch, chan);
batch->index = 0;
idxd_batch_cancel(chan, batch);
} else {
/* Command specific. */
desc->opcode = IDXD_OPCODE_BATCH;
@ -521,6 +530,11 @@ idxd_batch_submit(struct spdk_idxd_io_channel *chan, struct idxd_batch *batch,
TAILQ_INSERT_TAIL(&chan->ops_outstanding, (struct idxd_ops *)&batch->user_ops[i],
link);
}
batch->index = UINT8_MAX;
if (batch->transparent == true) {
/* for transparent batching, once we submit this batch it is no longer availablee */
chan->batch = NULL;
}
}
/* Submit operation. */
@ -536,13 +550,12 @@ _idxd_setup_batch(struct spdk_idxd_io_channel *chan)
struct idxd_batch *batch;
if (chan->batch == NULL) {
/* Open a new batch */
batch = idxd_batch_create(chan);
/* Open a new transparent batch */
batch = idxd_batch_create(chan, true);
if (batch == NULL) {
return -EBUSY;
}
chan->batch = batch;
}
} /* else use the existing one */
return 0;
}
@ -553,12 +566,17 @@ _idxd_flush_batch(struct spdk_idxd_io_channel *chan)
int rc;
if (chan->batch != NULL && chan->batch->index >= DESC_PER_BATCH) {
assert(chan->batch->transparent);
/* Close out the full batch */
rc = idxd_batch_submit(chan, chan->batch, NULL, NULL);
if (rc < 0) {
return rc;
if (rc) {
assert(rc == -EBUSY);
/* return 0 here as this is a transparent batch and we want to try again
* internally.
*/
return 0;
}
chan->batch = NULL;
}
return 0;
@ -585,7 +603,7 @@ _idxd_submit_copy_single(struct spdk_idxd_io_channel *chan, void *dst, const voi
/* Common prep. */
rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, chan->batch, &desc, &op);
if (rc) {
goto error;
return rc;
}
rc = _vtophys(src, &src_addr, nbytes);
@ -608,8 +626,7 @@ _idxd_submit_copy_single(struct spdk_idxd_io_channel *chan, void *dst, const voi
return _idxd_flush_batch(chan);
error:
idxd_batch_cancel(chan, chan->batch);
chan->batch = NULL;
chan->batch->index--;
return rc;
}
@ -645,11 +662,17 @@ spdk_idxd_submit_copy(struct spdk_idxd_io_channel *chan,
if (chan->batch) {
/* Close out existing batch */
idxd_batch_submit(chan, chan->batch, NULL, NULL);
chan->batch = NULL;
rc = idxd_batch_submit(chan, chan->batch, NULL, NULL);
if (rc) {
assert(rc == -EBUSY);
/* we can't submit the existing transparent batch so reply to the incoming
* op that we are busy so it will try again.
*/
return -EBUSY;
}
}
batch = idxd_batch_create(chan);
batch = idxd_batch_create(chan, false);
if (!batch) {
return -EBUSY;
}
@ -678,8 +701,13 @@ spdk_idxd_submit_copy(struct spdk_idxd_io_channel *chan,
desc->xfer_size = len;
}
return idxd_batch_submit(chan, batch, cb_fn, cb_arg);
rc = idxd_batch_submit(chan, batch, cb_fn, cb_arg);
if (rc) {
assert(rc == -EBUSY);
goto err;
}
return 0;
err:
idxd_batch_cancel(chan, batch);
return rc;
@ -764,7 +792,7 @@ _idxd_submit_compare_single(struct spdk_idxd_io_channel *chan, void *src1, const
/* Common prep. */
rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, chan->batch, &desc, &op);
if (rc) {
goto error;
return rc;
}
rc = _vtophys(src1, &src1_addr, nbytes);
@ -786,8 +814,7 @@ _idxd_submit_compare_single(struct spdk_idxd_io_channel *chan, void *src1, const
return _idxd_flush_batch(chan);
error:
idxd_batch_cancel(chan, chan->batch);
chan->batch = NULL;
chan->batch->index--;
return rc;
}
@ -818,11 +845,17 @@ spdk_idxd_submit_compare(struct spdk_idxd_io_channel *chan,
if (chan->batch) {
/* Close out existing batch */
idxd_batch_submit(chan, chan->batch, NULL, NULL);
chan->batch = NULL;
rc = idxd_batch_submit(chan, chan->batch, NULL, NULL);
if (rc) {
assert(rc == -EBUSY);
/* we can't submit the existing transparent batch so reply to the incoming
* op that we are busy so it will try again.
*/
return -EBUSY;
}
}
batch = idxd_batch_create(chan);
batch = idxd_batch_create(chan, false);
if (!batch) {
return -EBUSY;
}
@ -851,8 +884,13 @@ spdk_idxd_submit_compare(struct spdk_idxd_io_channel *chan,
desc->xfer_size = len;
}
return idxd_batch_submit(chan, batch, cb_fn, cb_arg);
rc = idxd_batch_submit(chan, batch, cb_fn, cb_arg);
if (rc) {
assert(rc == -EBUSY);
goto err;
}
return 0;
err:
idxd_batch_cancel(chan, batch);
return rc;
@ -878,7 +916,7 @@ _idxd_submit_fill_single(struct spdk_idxd_io_channel *chan, void *dst, uint64_t
/* Common prep. */
rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, chan->batch, &desc, &op);
if (rc) {
goto error;
return rc;
}
rc = _vtophys(dst, &dst_addr, nbytes);
@ -896,8 +934,7 @@ _idxd_submit_fill_single(struct spdk_idxd_io_channel *chan, void *dst, uint64_t
return _idxd_flush_batch(chan);
error:
idxd_batch_cancel(chan, chan->batch);
chan->batch = NULL;
chan->batch->index--;
return rc;
}
@ -921,11 +958,17 @@ spdk_idxd_submit_fill(struct spdk_idxd_io_channel *chan,
if (chan->batch) {
/* Close out existing batch */
idxd_batch_submit(chan, chan->batch, NULL, NULL);
chan->batch = NULL;
rc = idxd_batch_submit(chan, chan->batch, NULL, NULL);
if (rc) {
assert(rc == -EBUSY);
/* we can't submit the existing transparent batch so reply to the incoming
* op that we are busy so it will try again.
*/
return -EBUSY;
}
}
batch = idxd_batch_create(chan);
batch = idxd_batch_create(chan, false);
if (!batch) {
return -EBUSY;
}
@ -948,8 +991,13 @@ spdk_idxd_submit_fill(struct spdk_idxd_io_channel *chan,
desc->flags |= IDXD_FLAG_CACHE_CONTROL; /* direct IO to CPU cache instead of mem */
}
return idxd_batch_submit(chan, batch, cb_fn, cb_arg);
rc = idxd_batch_submit(chan, batch, cb_fn, cb_arg);
if (rc) {
assert(rc == -EBUSY);
goto err;
}
return 0;
err:
idxd_batch_cancel(chan, batch);
return rc;
@ -977,7 +1025,7 @@ _idxd_submit_crc32c_single(struct spdk_idxd_io_channel *chan, uint32_t *crc_dst,
/* Common prep. */
rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, chan->batch, &desc, &op);
if (rc) {
goto error;
return rc;
}
rc = _vtophys(src, &src_addr, nbytes);
@ -996,8 +1044,7 @@ _idxd_submit_crc32c_single(struct spdk_idxd_io_channel *chan, uint32_t *crc_dst,
return _idxd_flush_batch(chan);
error:
idxd_batch_cancel(chan, chan->batch);
chan->batch = NULL;
chan->batch->index--;
return rc;
}
@ -1023,11 +1070,17 @@ spdk_idxd_submit_crc32c(struct spdk_idxd_io_channel *chan,
if (chan->batch) {
/* Close out existing batch */
idxd_batch_submit(chan, chan->batch, NULL, NULL);
chan->batch = NULL;
rc = idxd_batch_submit(chan, chan->batch, NULL, NULL);
if (rc) {
assert(rc == -EBUSY);
/* we can't submit the existing transparent batch so reply to the incoming
* op that we are busy so it will try again.
*/
return -EBUSY;
}
}
batch = idxd_batch_create(chan);
batch = idxd_batch_create(chan, false);
if (!batch) {
return -EBUSY;
}
@ -1062,8 +1115,13 @@ spdk_idxd_submit_crc32c(struct spdk_idxd_io_channel *chan,
op->crc_dst = crc_dst;
}
return idxd_batch_submit(chan, batch, cb_fn, cb_arg);
rc = idxd_batch_submit(chan, batch, cb_fn, cb_arg);
if (rc) {
assert(rc == -EBUSY);
goto err;
}
return 0;
err:
idxd_batch_cancel(chan, batch);
return rc;
@ -1092,7 +1150,7 @@ _idxd_submit_copy_crc32c_single(struct spdk_idxd_io_channel *chan, void *dst, vo
/* Common prep. */
rc = _idxd_prep_batch_cmd(chan, cb_fn, cb_arg, chan->batch, &desc, &op);
if (rc) {
goto error;
return rc;
}
rc = _vtophys(src, &src_addr, nbytes);
@ -1117,8 +1175,7 @@ _idxd_submit_copy_crc32c_single(struct spdk_idxd_io_channel *chan, void *dst, vo
return _idxd_flush_batch(chan);
error:
idxd_batch_cancel(chan, chan->batch);
chan->batch = NULL;
chan->batch->index--;
return rc;
}
@ -1151,11 +1208,17 @@ spdk_idxd_submit_copy_crc32c(struct spdk_idxd_io_channel *chan,
if (chan->batch) {
/* Close out existing batch */
idxd_batch_submit(chan, chan->batch, NULL, NULL);
chan->batch = NULL;
rc = idxd_batch_submit(chan, chan->batch, NULL, NULL);
if (rc) {
assert(rc == -EBUSY);
/* we can't submit the existing transparent batch so reply to the incoming
* op that we are busy so it will try again.
*/
return -EBUSY;
}
}
batch = idxd_batch_create(chan);
batch = idxd_batch_create(chan, false);
if (!batch) {
return -EBUSY;
}
@ -1198,8 +1261,13 @@ spdk_idxd_submit_copy_crc32c(struct spdk_idxd_io_channel *chan,
op->crc_dst = crc_dst;
}
return idxd_batch_submit(chan, batch, cb_fn, cb_arg);
rc = idxd_batch_submit(chan, batch, cb_fn, cb_arg);
if (rc) {
assert(rc == -EBUSY);
goto err;
}
return 0;
err:
idxd_batch_cancel(chan, batch);
return rc;
@ -1223,7 +1291,7 @@ spdk_idxd_process_events(struct spdk_idxd_io_channel *chan)
{
struct idxd_ops *op, *tmp;
int status = 0;
int rc = 0;
int rc2, rc = 0;
void *cb_arg;
spdk_idxd_req_cb cb_fn;
@ -1286,8 +1354,10 @@ spdk_idxd_process_events(struct spdk_idxd_io_channel *chan)
/* Submit any built-up batch */
if (chan->batch) {
idxd_batch_submit(chan, chan->batch, NULL, NULL);
chan->batch = NULL;
rc2 = idxd_batch_submit(chan, chan->batch, NULL, NULL);
if (rc2) {
assert(rc2 == -EBUSY);
}
}
return rc;

View File

@ -78,6 +78,7 @@ struct idxd_batch {
uint64_t user_desc_addr;
uint8_t index;
struct spdk_idxd_io_channel *chan;
bool transparent;
TAILQ_ENTRY(idxd_batch) link;
};