nvme: add API for qpair poll groups.

This API will allow us to simplify the polling mechanism for qpairs on a single
thread. It also will pave the way for doing transport specific aggregation of
qpair polling to increase performance.

The generic implementation is included. The transport specific calls
have yet to be implemented.

Change-Id: If07b4170b2be61e4690847c993ec3bde9560b0f0
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/579
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Seth Howell 2020-02-05 13:25:05 -07:00 committed by Tomasz Zawadzki
parent 5d9d52fd3c
commit c998c6c69e
8 changed files with 575 additions and 2 deletions

View File

@ -1948,6 +1948,89 @@ void spdk_nvme_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf
const struct spdk_nvme_transport_id *spdk_nvme_ctrlr_get_transport_id(
struct spdk_nvme_ctrlr *ctrlr);
/**
* Opaque handle for a poll group. A poll group is a collection of spdk_nvme_qpair
* objects that are polled for completions as a unit.
*
* Returned by spdk_nvme_poll_group_create().
*/
struct spdk_nvme_poll_group;
/**
* This function alerts the user to failed qpairs when calling
* spdk_nvme_poll_group_process_completions.
*/
typedef void (*spdk_nvme_failed_qpair_cb)(struct spdk_nvme_qpair *qpair, void *poll_group_ctx);
/**
* Create a new poll group.
*
* \param ctx A user supplied context that can be retrieved later with spdk_nvme_poll_group_get_ctx
*
* \return Pointer to the new poll group, or NULL on error.
*/
struct spdk_nvme_poll_group *spdk_nvme_poll_group_create(void *ctx);
/**
* Add an spdk_nvme_qpair to a poll group. qpairs may only be added to
* a poll group if they are in the disconnected state; i.e. either they were
* just allocated and not yet connected or they have been disconnected with a call
* to spdk_nvme_ctrlr_disconnect_io_qpair.
*
* \param group The group to which the qpair will be added.
* \param qpair The qpair to add to the poll group.
*
* return 0 on success, -EINVAL if the qpair is not in the disabled state, -ENODEV if the transport
* doesn't exist, or -ENOMEM on memory allocation failures.
*/
int spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair);
/**
* Remove an spdk_nvme_qpair from a poll group.
*
* \param group The group from which to remove the qpair.
* \param qpair The qpair to remove from the poll group.
*
* return 0 on success, -ENOENT if the qpair is not found in the group.
*/
int spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair);
/**
* Destroy an empty poll group.
*
* \param group The group to destroy.
*
* return 0 on success, -EBUSY if the poll group is not empty.
*/
int spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group);
/**
* Poll for completions on all qpairs in this poll group.
*
* the failed_qpair_cb will be called for all failed qpairs in the poll group
* including qpairs which fail within the context of this call.
* The user is responsible for trying to reconnect or destroy those qpairs.
*
* \param group The group on which to poll for completions.
* \param completions_per_qpair The maximum number of completions per qpair.
* \param failed_qpair_cb A callback function of type spdk_nvme_failed_qpair_cb. Must be non-NULL.
*
* return The number of completions across all qpairs, -EINVAL if no failed_qpair_cb is passed, or
* -EIO if the shared completion queue cannot be polled for the RDMA transport.
*/
int64_t spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb);
/**
* Retrieve the user context for this specific poll group.
*
* \param group The poll group from which to retrieve the context.
*
* \return A pointer to the user provided poll group context.
*/
void *spdk_nvme_poll_group_get_ctx(struct spdk_nvme_poll_group *group);
/**
* Get the identify namespace data as defined by the NVMe specification.
*
@ -2963,6 +3046,11 @@ int spdk_nvme_map_prps(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs,
uint32_t len, size_t mps,
void *(*gpa_to_vva)(void *prv, uint64_t addr, uint64_t len));
/**
* Opaque handle for a transport poll group. Used by the transport function table.
*/
struct spdk_nvme_transport_poll_group;
struct nvme_request;
struct spdk_nvme_transport;
@ -3016,6 +3104,22 @@ struct spdk_nvme_transport_ops {
int32_t (*qpair_process_completions)(struct spdk_nvme_qpair *qpair, uint32_t max_completions);
void (*admin_qpair_abort_aers)(struct spdk_nvme_qpair *qpair);
struct spdk_nvme_transport_poll_group *(*poll_group_create)(void);
int (*poll_group_add)(struct spdk_nvme_transport_poll_group *tgroup, struct spdk_nvme_qpair *qpair);
int (*poll_group_remove)(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair);
int (*poll_group_activate_qpair)(struct spdk_nvme_qpair *qpair);
int (*poll_group_deactivate_qpair)(struct spdk_nvme_qpair *qpair);
int64_t (*poll_group_process_completions)(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb);
int (*poll_group_destroy)(struct spdk_nvme_transport_poll_group *tgroup);
};
/**

View File

@ -39,7 +39,7 @@ SO_MINOR := 0
SO_SUFFIX := $(SO_VER).$(SO_MINOR)
C_SRCS = nvme_ctrlr_cmd.c nvme_ctrlr.c nvme_fabric.c nvme_ns_cmd.c nvme_ns.c nvme_pcie.c nvme_qpair.c nvme.c nvme_quirks.c nvme_transport.c nvme_uevent.c nvme_ctrlr_ocssd_cmd.c \
nvme_ns_ocssd_cmd.c nvme_tcp.c nvme_opal.c nvme_io_msg.c
nvme_ns_ocssd_cmd.c nvme_tcp.c nvme_opal.c nvme_io_msg.c nvme_poll_group.c
C_SRCS-$(CONFIG_RDMA) += nvme_rdma.c
C_SRCS-$(CONFIG_NVME_CUSE) += nvme_cuse.c

View File

@ -393,6 +393,9 @@ struct spdk_nvme_qpair {
STAILQ_HEAD(, nvme_request) free_req;
STAILQ_HEAD(, nvme_request) queued_req;
/* List entry for spdk_nvme_transport_poll_group::qpairs */
STAILQ_ENTRY(spdk_nvme_qpair) poll_group_stailq;
/** Commands opcode in this list will return error */
TAILQ_HEAD(, nvme_error_cmd) err_cmd_head;
/** Requests in this list will return error */
@ -406,6 +409,10 @@ struct spdk_nvme_qpair {
struct spdk_nvme_ctrlr_process *active_proc;
struct spdk_nvme_transport_poll_group *poll_group;
void *poll_group_tailq_head;
void *req_buf;
const struct spdk_nvme_transport *transport;
@ -413,6 +420,19 @@ struct spdk_nvme_qpair {
uint8_t transport_failure_reason: 2;
};
struct spdk_nvme_poll_group {
void *ctx;
STAILQ_HEAD(, spdk_nvme_transport_poll_group) tgroups;
};
struct spdk_nvme_transport_poll_group {
struct spdk_nvme_poll_group *group;
const struct spdk_nvme_transport *transport;
STAILQ_HEAD(, spdk_nvme_qpair) active_qpairs;
STAILQ_HEAD(, spdk_nvme_qpair) failed_qpairs;
STAILQ_ENTRY(spdk_nvme_transport_poll_group) link;
};
struct spdk_nvme_ns {
struct spdk_nvme_ctrlr *ctrlr;
uint32_t sector_size;
@ -802,6 +822,10 @@ nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
return pthread_mutex_unlock(mtx);
}
/* Poll group management functions. */
int nvme_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair);
int nvme_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair);
/* Admin functions */
int nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
uint8_t cns, uint16_t cntid, uint32_t nsid,
@ -1146,7 +1170,17 @@ int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nv
int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
uint32_t max_completions);
void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
const struct spdk_nvme_transport *transport);
int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair);
int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb);
int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
/*
* Below ref related functions must be called with the global
* driver lock held for the multi-process condition.

View File

@ -2355,6 +2355,51 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
return num_completions;
}
static struct spdk_nvme_transport_poll_group *
nvme_pcie_poll_group_create(void)
{
return NULL;
}
static int
nvme_pcie_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_pcie_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
}
static int
nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
}
static int64_t
nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
{
return -ENOTSUP;
}
static int
nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
{
return -ENOTSUP;
}
const struct spdk_nvme_transport_ops pcie_ops = {
.name = "PCIE",
.type = SPDK_NVME_TRANSPORT_PCIE,
@ -2384,6 +2429,14 @@ const struct spdk_nvme_transport_ops pcie_ops = {
.qpair_submit_request = nvme_pcie_qpair_submit_request,
.qpair_process_completions = nvme_pcie_qpair_process_completions,
.admin_qpair_abort_aers = nvme_pcie_admin_qpair_abort_aers,
.poll_group_create = nvme_pcie_poll_group_create,
.poll_group_activate_qpair = nvme_pcie_poll_group_activate_qpair,
.poll_group_deactivate_qpair = nvme_pcie_poll_group_deactivate_qpair,
.poll_group_add = nvme_pcie_poll_group_add,
.poll_group_remove = nvme_pcie_poll_group_remove,
.poll_group_process_completions = nvme_pcie_poll_group_process_completions,
.poll_group_destroy = nvme_pcie_poll_group_destroy,
};
SPDK_NVME_TRANSPORT_REGISTER(pcie, &pcie_ops);

164
lib/nvme/nvme_poll_group.c Normal file
View File

@ -0,0 +1,164 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "nvme_internal.h"
struct spdk_nvme_poll_group *
spdk_nvme_poll_group_create(void *ctx)
{
struct spdk_nvme_poll_group *group;
group = calloc(1, sizeof(*group));
if (group == NULL) {
return NULL;
}
group->ctx = ctx;
STAILQ_INIT(&group->tgroups);
return group;
}
int
spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
const struct spdk_nvme_transport *transport;
if (nvme_qpair_get_state(qpair) != NVME_QPAIR_DISABLED) {
return -EINVAL;
}
STAILQ_FOREACH(tgroup, &group->tgroups, link) {
if (tgroup->transport == qpair->transport) {
break;
}
}
/* See if a new transport has been added (dlopen style) and we need to update the poll group */
if (!tgroup) {
transport = nvme_get_first_transport();
while (transport != NULL) {
if (transport == qpair->transport) {
tgroup = nvme_transport_poll_group_create(transport);
if (tgroup == NULL) {
return -ENOMEM;
}
tgroup->group = group;
STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
break;
}
transport = nvme_get_next_transport(transport);
}
}
return tgroup ? nvme_transport_poll_group_add(tgroup, qpair) : -ENODEV;
}
int
spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
STAILQ_FOREACH(tgroup, &group->tgroups, link) {
if (tgroup->transport == qpair->transport) {
return nvme_transport_poll_group_remove(tgroup, qpair);
}
}
return -ENODEV;
}
int
nvme_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
{
return nvme_transport_poll_group_activate_qpair(qpair);
}
int
nvme_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
{
return nvme_transport_poll_group_deactivate_qpair(qpair);
}
int64_t
spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
{
struct spdk_nvme_transport_poll_group *tgroup;
int64_t local_completions = 0, error_reason = 0, num_completions = 0;
if (failed_qpair_cb == NULL) {
return -EINVAL;
}
STAILQ_FOREACH(tgroup, &group->tgroups, link) {
local_completions = nvme_transport_poll_group_process_completions(tgroup, completions_per_qpair,
failed_qpair_cb);
if (local_completions < 0 && error_reason == 0) {
error_reason = local_completions;
} else {
num_completions += local_completions;
/* Just to be safe */
assert(num_completions >= 0);
}
}
return error_reason ? error_reason : num_completions;
}
void *
spdk_nvme_poll_group_get_ctx(struct spdk_nvme_poll_group *group)
{
return group->ctx;
}
int
spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group)
{
struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
if (nvme_transport_poll_group_destroy(tgroup) != 0) {
STAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
return -EBUSY;
}
}
free(group);
return 0;
}

View File

@ -2138,6 +2138,51 @@ nvme_rdma_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
}
}
static struct spdk_nvme_transport_poll_group *
nvme_rdma_poll_group_create(void)
{
return NULL;
}
static int
nvme_rdma_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_rdma_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_rdma_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
}
static int
nvme_rdma_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
}
static int64_t
nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
{
return -ENOTSUP;
}
static int
nvme_rdma_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
{
return -ENOTSUP;
}
void
spdk_nvme_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
{
@ -2170,6 +2215,15 @@ const struct spdk_nvme_transport_ops rdma_ops = {
.qpair_submit_request = nvme_rdma_qpair_submit_request,
.qpair_process_completions = nvme_rdma_qpair_process_completions,
.admin_qpair_abort_aers = nvme_rdma_admin_qpair_abort_aers,
.poll_group_create = nvme_rdma_poll_group_create,
.poll_group_activate_qpair = nvme_rdma_poll_group_activate_qpair,
.poll_group_deactivate_qpair = nvme_rdma_poll_group_deactivate_qpair,
.poll_group_add = nvme_rdma_poll_group_add,
.poll_group_remove = nvme_rdma_poll_group_remove,
.poll_group_process_completions = nvme_rdma_poll_group_process_completions,
.poll_group_destroy = nvme_rdma_poll_group_destroy,
};
SPDK_NVME_TRANSPORT_REGISTER(rdma, &rdma_ops);

View File

@ -1688,6 +1688,51 @@ nvme_tcp_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
}
}
static struct spdk_nvme_transport_poll_group *
nvme_tcp_poll_group_create(void)
{
return NULL;
}
static int
nvme_tcp_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_tcp_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_tcp_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
}
static int
nvme_tcp_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
}
static int64_t
nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
{
return -ENOTSUP;
}
static int
nvme_tcp_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
{
return -ENOTSUP;
}
const struct spdk_nvme_transport_ops tcp_ops = {
.name = "TCP",
.type = SPDK_NVME_TRANSPORT_TCP,
@ -1714,6 +1759,14 @@ const struct spdk_nvme_transport_ops tcp_ops = {
.qpair_submit_request = nvme_tcp_qpair_submit_request,
.qpair_process_completions = nvme_tcp_qpair_process_completions,
.admin_qpair_abort_aers = nvme_tcp_admin_qpair_abort_aers,
.poll_group_create = nvme_tcp_poll_group_create,
.poll_group_activate_qpair = nvme_tcp_poll_group_activate_qpair,
.poll_group_deactivate_qpair = nvme_tcp_poll_group_deactivate_qpair,
.poll_group_add = nvme_tcp_poll_group_add,
.poll_group_remove = nvme_tcp_poll_group_remove,
.poll_group_process_completions = nvme_tcp_poll_group_process_completions,
.poll_group_destroy = nvme_tcp_poll_group_destroy,
};
SPDK_NVME_TRANSPORT_REGISTER(tcp, &tcp_ops);

View File

@ -359,3 +359,114 @@ nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
assert(transport != NULL);
transport->ops.admin_qpair_abort_aers(qpair);
}
struct spdk_nvme_transport_poll_group *
nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
{
struct spdk_nvme_transport_poll_group *group = NULL;
group = transport->ops.poll_group_create();
if (group) {
group->transport = transport;
STAILQ_INIT(&group->active_qpairs);
STAILQ_INIT(&group->failed_qpairs);
}
return group;
}
int
nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
int rc;
rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
if (rc == 0) {
qpair->poll_group = tgroup;
assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
qpair->poll_group_tailq_head = &tgroup->failed_qpairs;
STAILQ_INSERT_TAIL(&tgroup->failed_qpairs, qpair, poll_group_stailq);
}
return rc;
}
int
nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
int rc;
rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
if (rc == 0) {
qpair->poll_group = NULL;
}
return rc;
}
int64_t
nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
{
return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
failed_qpair_cb);
}
int
nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
{
return tgroup->transport->ops.poll_group_destroy(tgroup);
}
int
nvme_transport_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
int rc;
tgroup = qpair->poll_group;
if (qpair->poll_group_tailq_head == &tgroup->failed_qpairs) {
return 0;
}
if (qpair->poll_group_tailq_head == &tgroup->active_qpairs) {
rc = tgroup->transport->ops.poll_group_deactivate_qpair(qpair);
if (rc == 0) {
qpair->poll_group_tailq_head = &tgroup->failed_qpairs;
STAILQ_REMOVE(&tgroup->active_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->failed_qpairs, qpair, poll_group_stailq);
}
return rc;
}
return -EINVAL;
}
int
nvme_transport_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
int rc;
tgroup = qpair->poll_group;
if (qpair->poll_group_tailq_head == &tgroup->active_qpairs) {
return 0;
}
if (qpair->poll_group_tailq_head == &tgroup->failed_qpairs) {
rc = tgroup->transport->ops.poll_group_activate_qpair(qpair);
if (rc == 0) {
qpair->poll_group_tailq_head = &tgroup->active_qpairs;
STAILQ_REMOVE(&tgroup->failed_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->active_qpairs, qpair, poll_group_stailq);
}
return rc;
}
return -EINVAL;
}