From 1bea88059893b94c4bbf5e076a2dea43acc80a1c Mon Sep 17 00:00:00 2001 From: Konrad Sztyber Date: Tue, 22 Jun 2021 12:12:50 +0200 Subject: [PATCH] nvme: asynchronous register operations This patch introduces asynchronous versions of the ctrlr_(get|set)_reg functions. Not all transports need to define them - for those that it doesn't make sense (e.g. PCIe), the transport layer will call the synchronous API and queue the callback to be executed during the next process_completions call. Signed-off-by: Konrad Sztyber Change-Id: I2e78e72b5eba58340885381cb279f3c28e7995ec Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8607 Tested-by: SPDK CI Jenkins Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Reviewed-by: Aleksey Marchuk Reviewed-by: Ben Walker Reviewed-by: Shuhei Matsumoto --- include/spdk/nvme.h | 21 ++++++++ lib/nvme/nvme_ctrlr.c | 1 + lib/nvme/nvme_internal.h | 18 +++++++ lib/nvme/nvme_qpair.c | 27 ++++++++++ lib/nvme/nvme_transport.c | 107 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 174 insertions(+) diff --git a/include/spdk/nvme.h b/include/spdk/nvme.h index 0c48640672..2ff8dfc84c 100644 --- a/include/spdk/nvme.h +++ b/include/spdk/nvme.h @@ -3804,6 +3804,15 @@ struct spdk_nvme_transport_poll_group; */ void spdk_nvme_cuse_update_namespaces(struct spdk_nvme_ctrlr *ctrlr); +/** + * Signature for callback invoked after completing a register read/write operation. + * + * \param ctx Context passed by the user. + * \param value Value of the register, undefined in case of a failure. + * \param cpl Completion queue entry that contains the status of the command. + */ +typedef void (*spdk_nvme_reg_cb)(void *ctx, uint64_t value, const struct spdk_nvme_cpl *cpl); + struct nvme_request; struct spdk_nvme_transport; @@ -3831,6 +3840,18 @@ struct spdk_nvme_transport_ops { int (*ctrlr_get_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value); + int (*ctrlr_set_reg_4_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value, + spdk_nvme_reg_cb cb_fn, void *cb_arg); + + int (*ctrlr_set_reg_8_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value, + spdk_nvme_reg_cb cb_fn, void *cb_arg); + + int (*ctrlr_get_reg_4_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + spdk_nvme_reg_cb cb_fn, void *cb_arg); + + int (*ctrlr_get_reg_8_async)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + spdk_nvme_reg_cb cb_fn, void *cb_arg); + uint32_t (*ctrlr_get_max_xfer_size)(struct spdk_nvme_ctrlr *ctrlr); uint16_t (*ctrlr_get_max_sges)(struct spdk_nvme_ctrlr *ctrlr); diff --git a/lib/nvme/nvme_ctrlr.c b/lib/nvme/nvme_ctrlr.c index 55e6c8368f..b8f17b1128 100644 --- a/lib/nvme/nvme_ctrlr.c +++ b/lib/nvme/nvme_ctrlr.c @@ -3712,6 +3712,7 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr) } TAILQ_INIT(&ctrlr->active_procs); + STAILQ_INIT(&ctrlr->register_operations); return rc; } diff --git a/lib/nvme/nvme_internal.h b/lib/nvme/nvme_internal.h index f1b37e7819..792924f27c 100644 --- a/lib/nvme/nvme_internal.h +++ b/lib/nvme/nvme_internal.h @@ -787,6 +787,13 @@ struct spdk_nvme_ctrlr_process { STAILQ_HEAD(, spdk_nvme_ctrlr_aer_completion_list) async_events; }; +struct nvme_register_completion { + struct spdk_nvme_cpl cpl; + uint64_t value; + spdk_nvme_reg_cb cb_fn; + void *cb_ctx; + STAILQ_ENTRY(nvme_register_completion) stailq; +}; /* * One of these per allocated PCI device. @@ -938,6 +945,9 @@ struct spdk_nvme_ctrlr { unsigned int fw_size_remaining; unsigned int fw_offset; unsigned int fw_transfer_size; + + /* Completed register operations */ + STAILQ_HEAD(, nvme_register_completion) register_operations; }; struct spdk_nvme_probe_ctx { @@ -1392,6 +1402,14 @@ int nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offse int nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value); int nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value); int nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value); +int nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + uint32_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg); +int nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + uint64_t value, spdk_nvme_reg_cb cb_fn, void *cb_arg); +int nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + spdk_nvme_reg_cb cb_fn, void *cb_arg); +int nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + spdk_nvme_reg_cb cb_fn, void *cb_arg); uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr); uint16_t nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr); struct spdk_nvme_qpair *nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, diff --git a/lib/nvme/nvme_qpair.c b/lib/nvme/nvme_qpair.c index 0fc870155a..25c44df41a 100644 --- a/lib/nvme/nvme_qpair.c +++ b/lib/nvme/nvme_qpair.c @@ -689,6 +689,28 @@ nvme_qpair_resubmit_requests(struct spdk_nvme_qpair *qpair, uint32_t num_request _nvme_qpair_complete_abort_queued_reqs(qpair); } +static void +nvme_complete_register_operations(struct spdk_nvme_qpair *qpair) +{ + struct nvme_register_completion *ctx; + struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; + STAILQ_HEAD(, nvme_register_completion) operations; + + STAILQ_INIT(&operations); + nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); + STAILQ_SWAP(&ctrlr->register_operations, &operations, nvme_register_completion); + nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); + + while (!STAILQ_EMPTY(&operations)) { + ctx = STAILQ_FIRST(&operations); + STAILQ_REMOVE_HEAD(&operations, stailq); + if (ctx->cb_fn != NULL) { + ctx->cb_fn(ctx->cb_ctx, ctx->value, &ctx->cpl); + } + free(ctx); + } +} + int32_t spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) { @@ -750,6 +772,11 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_ nvme_qpair_resubmit_requests(qpair, ret); } + /* Complete any pending register operations */ + if (nvme_qpair_is_admin_queue(qpair)) { + nvme_complete_register_operations(qpair); + } + return ret; } diff --git a/lib/nvme/nvme_transport.c b/lib/nvme/nvme_transport.c index d5f9aae494..2a96d3a9a2 100644 --- a/lib/nvme/nvme_transport.c +++ b/lib/nvme/nvme_transport.c @@ -204,6 +204,113 @@ nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, u return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value); } +static int +nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value, + spdk_nvme_reg_cb cb_fn, void *cb_ctx) +{ + struct nvme_register_completion *ctx; + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { + return -ENOMEM; + } + + ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC; + ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS; + ctx->cb_fn = cb_fn; + ctx->cb_ctx = cb_ctx; + ctx->value = value; + + nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); + STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq); + nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); + + return 0; +} + +int +nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value, + spdk_nvme_reg_cb cb_fn, void *cb_arg) +{ + const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); + int rc; + + assert(transport != NULL); + if (transport->ops.ctrlr_set_reg_4_async == NULL) { + rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value); + if (rc != 0) { + return rc; + } + + return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); + } + + return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg); +} + +int +nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value, + spdk_nvme_reg_cb cb_fn, void *cb_arg) + +{ + const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); + int rc; + + assert(transport != NULL); + if (transport->ops.ctrlr_set_reg_8_async == NULL) { + rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value); + if (rc != 0) { + return rc; + } + + return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); + } + + return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg); +} + +int +nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + spdk_nvme_reg_cb cb_fn, void *cb_arg) +{ + const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); + uint32_t value; + int rc; + + assert(transport != NULL); + if (transport->ops.ctrlr_get_reg_4_async == NULL) { + rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value); + if (rc != 0) { + return rc; + } + + return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); + } + + return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg); +} + +int +nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, + spdk_nvme_reg_cb cb_fn, void *cb_arg) +{ + const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); + uint64_t value; + int rc; + + assert(transport != NULL); + if (transport->ops.ctrlr_get_reg_8_async == NULL) { + rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value); + if (rc != 0) { + return rc; + } + + return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); + } + + return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg); +} + uint32_t nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr) {