nvme: Add transport intrafce to get/free stats

The new 2 API function allow to get and free stats
per poll group. New function to get transport name
have been added to report not only transport type but
also the name.
For now only RDMA transport reports statistics,
other transports will be added later.

Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Change-Id: I2824cb474fde5fa859cf8196dabac2c48c05709c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6299
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Alexey Marchuk 2021-02-08 13:53:23 +03:00 committed by Jim Harris
parent 50569293ef
commit 3fcda8e779
4 changed files with 115 additions and 3 deletions

View File

@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -450,6 +450,28 @@ struct spdk_nvme_host_id {
char hostsvcid[SPDK_NVMF_TRSVCID_MAX_LEN + 1];
};
struct spdk_nvme_rdma_device_stat {
const char *name;
uint64_t polls;
uint64_t idle_polls;
uint64_t completions;
uint64_t queued_requests;
uint64_t total_send_wrs;
uint64_t send_doorbell_updates;
uint64_t total_recv_wrs;
uint64_t recv_doorbell_updates;
};
struct spdk_nvme_transport_poll_group_stat {
spdk_nvme_transport_type_t trtype;
union {
struct {
uint32_t num_devices;
struct spdk_nvme_rdma_device_stat *device_stats;
} rdma;
};
};
/*
* Controller support flags
*
@ -3502,6 +3524,12 @@ struct spdk_nvme_transport_ops {
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
int (*poll_group_destroy)(struct spdk_nvme_transport_poll_group *tgroup);
int (*poll_group_get_stats)(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat **stats);
void (*poll_group_free_stats)(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat *stats);
};
/**

View File

@ -1335,6 +1335,10 @@ int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
int nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat **stats);
void nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat *stats);
/*
* Below ref related functions must be called with the global
* driver lock held for the multi-process condition.

View File

@ -168,7 +168,7 @@ struct nvme_rdma_poller {
struct nvme_rdma_poll_group {
struct spdk_nvme_transport_poll_group group;
STAILQ_HEAD(, nvme_rdma_poller) pollers;
int num_pollers;
uint32_t num_pollers;
STAILQ_HEAD(, nvme_rdma_destroyed_qpair) destroyed_qpairs;
};
@ -2648,6 +2648,65 @@ nvme_rdma_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
return 0;
}
static int
nvme_rdma_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat **_stats)
{
struct nvme_rdma_poll_group *group;
struct spdk_nvme_transport_poll_group_stat *stats;
struct spdk_nvme_rdma_device_stat *device_stat;
struct nvme_rdma_poller *poller;
uint32_t i = 0;
if (tgroup == NULL || _stats == NULL) {
SPDK_ERRLOG("Invalid stats or group pointer\n");
return -EINVAL;
}
group = nvme_rdma_poll_group(tgroup);
stats = calloc(1, sizeof(*stats));
if (!stats) {
SPDK_ERRLOG("Can't allocate memory for RDMA stats\n");
return -ENOMEM;
}
stats->trtype = SPDK_NVME_TRANSPORT_RDMA;
stats->rdma.num_devices = group->num_pollers;
stats->rdma.device_stats = calloc(stats->rdma.num_devices, sizeof(*stats->rdma.device_stats));
if (!stats->rdma.device_stats) {
SPDK_ERRLOG("Can't allocate memory for RDMA device stats\n");
free(stats);
return -ENOMEM;
}
STAILQ_FOREACH(poller, &group->pollers, link) {
device_stat = &stats->rdma.device_stats[i];
device_stat->name = poller->device->device->name;
device_stat->polls = poller->stats.polls;
device_stat->idle_polls = poller->stats.idle_polls;
device_stat->completions = poller->stats.completions;
device_stat->queued_requests = poller->stats.queued_requests;
device_stat->total_send_wrs = poller->stats.rdma_stats.send.num_submitted_wrs;
device_stat->send_doorbell_updates = poller->stats.rdma_stats.send.doorbell_updates;
device_stat->total_recv_wrs = poller->stats.rdma_stats.recv.num_submitted_wrs;
device_stat->recv_doorbell_updates = poller->stats.rdma_stats.recv.doorbell_updates;
i++;
}
*_stats = stats;
return 0;
}
static void
nvme_rdma_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat *stats)
{
if (stats) {
free(stats->rdma.device_stats);
}
free(stats);
}
void
spdk_nvme_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
{
@ -2689,7 +2748,8 @@ const struct spdk_nvme_transport_ops rdma_ops = {
.poll_group_remove = nvme_rdma_poll_group_remove,
.poll_group_process_completions = nvme_rdma_poll_group_process_completions,
.poll_group_destroy = nvme_rdma_poll_group_destroy,
.poll_group_get_stats = nvme_rdma_poll_group_get_stats,
.poll_group_free_stats = nvme_rdma_poll_group_free_stats,
};
SPDK_NVME_TRANSPORT_REGISTER(rdma, &rdma_ops);

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -599,3 +600,22 @@ nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
return -EINVAL;
}
int
nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat **stats)
{
if (tgroup->transport->ops.poll_group_get_stats) {
return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
}
return -ENOTSUP;
}
void
nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_transport_poll_group_stat *stats)
{
if (tgroup->transport->ops.poll_group_free_stats) {
tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
}
}