nvme/tcp: add naive implementation of poll_group api

This implementation simply loops over qpairs calling process_completions.

Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ia1f59c13444703e00c6b769d378874f48b9ef03e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/627
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Seth Howell 2020-02-06 12:43:31 -07:00 committed by Jim Harris
parent a8f18b0da8
commit fe5e1db68e
3 changed files with 50 additions and 5 deletions

View File

@ -64,6 +64,10 @@ struct nvme_tcp_ctrlr {
struct spdk_nvme_ctrlr ctrlr;
};
struct nvme_tcp_poll_group {
struct spdk_nvme_transport_poll_group group;
};
/* NVMe TCP qpair extensions for spdk_nvme_qpair */
struct nvme_tcp_qpair {
struct spdk_nvme_qpair qpair;
@ -1709,7 +1713,14 @@ nvme_tcp_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
static struct spdk_nvme_transport_poll_group *
nvme_tcp_poll_group_create(void)
{
return NULL;
struct nvme_tcp_poll_group *group = calloc(1, sizeof(*group));
if (group == NULL) {
SPDK_ERRLOG("Unable to allocate poll group.\n");
return NULL;
}
return &group->group;
}
static int
@ -1728,27 +1739,50 @@ static int
nvme_tcp_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
return 0;
}
static int
nvme_tcp_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
return -ENOTSUP;
return 0;
}
static int64_t
nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{
return -ENOTSUP;
struct spdk_nvme_qpair *qpair, *tmp_qpair;
int32_t local_completions = 0;
int64_t total_completions = 0;
STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
disconnected_qpair_cb(qpair, tgroup->group->ctx);
}
STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair);
if (local_completions < 0) {
disconnected_qpair_cb(qpair, tgroup->group->ctx);
local_completions = 0;
}
total_completions += local_completions;
}
return total_completions;
}
static int
nvme_tcp_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
{
return -ENOTSUP;
if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
return -EBUSY;
}
free(tgroup);
return 0;
}
const struct spdk_nvme_transport_ops tcp_ops = {

View File

@ -443,7 +443,16 @@ nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
if (rc == 0) {
if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
} else if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
} else {
return -ENOENT;
}
qpair->poll_group = NULL;
qpair->poll_group_tailq_head = NULL;
}
return rc;

View File

@ -44,6 +44,8 @@ SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME);
DEFINE_STUB(nvme_qpair_submit_request,
int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t, (struct spdk_nvme_qpair *qpair,
uint32_t max_completions), 0);
DEFINE_STUB(spdk_sock_set_priority,
int, (struct spdk_sock *sock, int priority), 0);