nvme: asynchronous register operations

This patch introduces asynchronous versions of the ctrlr_(get|set)_reg
functions.  Not all transports need to define them - for those that it
doesn't make sense (e.g. PCIe), the transport layer will call the
synchronous API and queue the callback to be executed during the next
process_completions call.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I2e78e72b5eba58340885381cb279f3c28e7995ec
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8607
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Konrad Sztyber 2021-06-22 12:12:50 +02:00 committed by Tomasz Zawadzki
parent e72453b39f
commit 1bea880598
5 changed files with 174 additions and 0 deletions

View File

@ -3804,6 +3804,15 @@ struct spdk_nvme_transport_poll_group;
*/
void spdk_nvme_cuse_update_namespaces(struct spdk_nvme_ctrlr *ctrlr);
/**
* Signature for callback invoked after completing a register read/write operation.
*
* \param ctx Context passed by the user.
* \param value Value of the register, undefined in case of a failure.
* \param cpl Completion queue entry that contains the status of the command.
*/
typedef void (*spdk_nvme_reg_cb)(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl);
struct nvme_request;
struct spdk_nvme_transport;
@ -3831,6 +3840,18 @@ struct spdk_nvme_transport_ops {
int (*ctrlr_get_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
int (*ctrlr_set_reg_4_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
spdk_nvme_reg_cb cb_fn, void *cb_arg);
int (*ctrlr_set_reg_8_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
spdk_nvme_reg_cb cb_fn, void *cb_arg);
int (*ctrlr_get_reg_4_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
spdk_nvme_reg_cb cb_fn, void *cb_arg);
int (*ctrlr_get_reg_8_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
spdk_nvme_reg_cb cb_fn, void *cb_arg);
uint32_t (*ctrlr_get_max_xfer_size)(struct spdk_nvme_ctrlr *ctrlr);
uint16_t (*ctrlr_get_max_sges)(struct spdk_nvme_ctrlr *ctrlr);

View File

@ -3712,6 +3712,7 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
}
TAILQ_INIT(&ctrlr->active_procs);
STAILQ_INIT(&ctrlr->register_operations);
return rc;
}

View File

@ -787,6 +787,13 @@ struct spdk_nvme_ctrlr_process {
STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list) async_events;
};
struct nvme_register_completion {
struct spdk_nvme_cpl cpl;
uint64_t value;
spdk_nvme_reg_cb cb_fn;
void *cb_ctx;
STAILQ_ENTRY(nvme_register_completion) stailq;
};
/*
* One of these per allocated PCI device.
@ -938,6 +945,9 @@ struct spdk_nvme_ctrlr {
unsigned int fw_size_remaining;
unsigned int fw_offset;
unsigned int fw_transfer_size;
/* Completed register operations */
STAILQ_HEAD(, nvme_register_completion) register_operations;
};
struct spdk_nvme_probe_ctx {
@ -1392,6 +1402,14 @@ int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offse
int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg);
int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
spdk_nvme_reg_cb cb_fn, void *cb_arg);
int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
spdk_nvme_reg_cb cb_fn, void *cb_arg);
uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr);
uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr);
struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,

View File

@ -689,6 +689,28 @@ nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_request
_nvme_qpair_complete_abort_queued_reqs(qpair);
}
static void
nvme_complete_register_operations(struct spdk_nvme_qpair *qpair)
{
struct nvme_register_completion *ctx;
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
STAILQ_HEAD(, nvme_register_completion) operations;
STAILQ_INIT(&operations);
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
STAILQ_SWAP(&ctrlr->register_operations, &operations, nvme_register_completion);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
while (!STAILQ_EMPTY(&operations)) {
ctx = STAILQ_FIRST(&operations);
STAILQ_REMOVE_HEAD(&operations, stailq);
if (ctx->cb_fn != NULL) {
ctx->cb_fn(ctx->cb_ctx, ctx->value, &ctx->cpl);
}
free(ctx);
}
}
int32_t
spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
@ -750,6 +772,11 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
nvme_qpair_resubmit_requests(qpair, ret);
}
/* Complete any pending register operations */
if (nvme_qpair_is_admin_queue(qpair)) {
nvme_complete_register_operations(qpair);
}
return ret;
}

View File

@ -204,6 +204,113 @@ nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, u
return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
}
static int
nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
spdk_nvme_reg_cb cb_fn, void *cb_ctx)
{
struct nvme_register_completion *ctx;
ctx = calloc(1, sizeof(*ctx));
if (ctx == NULL) {
return -ENOMEM;
}
ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
ctx->cb_fn = cb_fn;
ctx->cb_ctx = cb_ctx;
ctx->value = value;
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
}
int
nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
spdk_nvme_reg_cb cb_fn, void *cb_arg)
{
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
int rc;
assert(transport != NULL);
if (transport->ops.ctrlr_set_reg_4_async == NULL) {
rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
if (rc != 0) {
return rc;
}
return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
}
return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
}
int
nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
spdk_nvme_reg_cb cb_fn, void *cb_arg)
{
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
int rc;
assert(transport != NULL);
if (transport->ops.ctrlr_set_reg_8_async == NULL) {
rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
if (rc != 0) {
return rc;
}
return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
}
return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
}
int
nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
spdk_nvme_reg_cb cb_fn, void *cb_arg)
{
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
uint32_t value;
int rc;
assert(transport != NULL);
if (transport->ops.ctrlr_get_reg_4_async == NULL) {
rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
if (rc != 0) {
return rc;
}
return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
}
return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
}
int
nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
spdk_nvme_reg_cb cb_fn, void *cb_arg)
{
const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
uint64_t value;
int rc;
assert(transport != NULL);
if (transport->ops.ctrlr_get_reg_8_async == NULL) {
rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
if (rc != 0) {
return rc;
}
return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
}
return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
}
uint32_t
nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
{