bdev/nvme: Rename nvme_bdev_poll_group by nvme_poll_group

This object aggregates multiple I/O qpairs for their completion
operations and may be a higher layer object. However, the
aggregation is only to poll completions efficiently. Hence if we
follow the new naming rule, nvme_poll_group is better than
nvme_ctrlr_poll_group and nvme_bdev_poll_group, and rename
nvme_bdev_poll_group by nvme_poll_group.

Besides, many functions in NVMe bdev module have a naming rule,
  bdev_nvme + verb + objective
Follow this rule for a few functions related with nvme_poll_group.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I5e5cb6001d4a862c2121b7265cbbffe0c2109785
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8720
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2021-07-07 02:20:32 +09:00 committed by Tomasz Zawadzki
parent a3dcdc051f
commit 4af68eade6
3 changed files with 20 additions and 22 deletions

View File

@ -316,7 +316,7 @@ bdev_nvme_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_
static int
bdev_nvme_poll(void *arg)
{
struct nvme_bdev_poll_group *group = arg;
struct nvme_poll_group *group = arg;
int64_t num_completions;
if (group->collect_spin_stat && group->start_ticks == 0) {
@ -1081,11 +1081,11 @@ bdev_nvme_destroy_ctrlr_channel_cb(void *io_device, void *ctx_buf)
}
static void
bdev_nvme_poll_group_submit_accel_crc32c(void *ctx, uint32_t *dst, struct iovec *iov,
uint32_t iov_cnt, uint32_t seed,
spdk_nvme_accel_completion_cb cb_fn, void *cb_arg)
bdev_nvme_submit_accel_crc32c(void *ctx, uint32_t *dst, struct iovec *iov,
uint32_t iov_cnt, uint32_t seed,
spdk_nvme_accel_completion_cb cb_fn, void *cb_arg)
{
struct nvme_bdev_poll_group *group = ctx;
struct nvme_poll_group *group = ctx;
int rc;
assert(group->accel_channel != NULL);
@ -1103,13 +1103,13 @@ bdev_nvme_poll_group_submit_accel_crc32c(void *ctx, uint32_t *dst, struct iovec
static struct spdk_nvme_accel_fn_table g_bdev_nvme_accel_fn_table = {
.table_size = sizeof(struct spdk_nvme_accel_fn_table),
.submit_accel_crc32c = bdev_nvme_poll_group_submit_accel_crc32c,
.submit_accel_crc32c = bdev_nvme_submit_accel_crc32c,
};
static int
bdev_nvme_poll_group_create_cb(void *io_device, void *ctx_buf)
bdev_nvme_create_poll_group_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_poll_group *group = ctx_buf;
struct nvme_poll_group *group = ctx_buf;
group->group = spdk_nvme_poll_group_create(group, &g_bdev_nvme_accel_fn_table);
if (group->group == NULL) {
@ -1136,9 +1136,9 @@ bdev_nvme_poll_group_create_cb(void *io_device, void *ctx_buf)
}
static void
bdev_nvme_poll_group_destroy_cb(void *io_device, void *ctx_buf)
bdev_nvme_destroy_poll_group_cb(void *io_device, void *ctx_buf)
{
struct nvme_bdev_poll_group *group = ctx_buf;
struct nvme_poll_group *group = ctx_buf;
if (group->accel_channel) {
spdk_put_io_channel(group->accel_channel);
@ -1315,7 +1315,7 @@ static uint64_t
bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
{
struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(ch);
struct nvme_bdev_poll_group *group = ctrlr_ch->group;
struct nvme_poll_group *group = ctrlr_ch->group;
uint64_t spin_time;
if (!group || !group->collect_spin_stat) {
@ -2469,9 +2469,9 @@ bdev_nvme_library_init(void)
{
g_bdev_nvme_init_thread = spdk_get_thread();
spdk_io_device_register(&g_nvme_ctrlrs, bdev_nvme_poll_group_create_cb,
bdev_nvme_poll_group_destroy_cb,
sizeof(struct nvme_bdev_poll_group), "bdev_nvme_poll_groups");
spdk_io_device_register(&g_nvme_ctrlrs, bdev_nvme_create_poll_group_cb,
bdev_nvme_destroy_poll_group_cb,
sizeof(struct nvme_poll_group), "nvme_poll_groups");
return 0;
}

View File

@ -1003,8 +1003,7 @@ rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i)
{
struct rpc_bdev_nvme_transport_stat_ctx *ctx;
struct spdk_io_channel *ch;
struct nvme_bdev_poll_group *bdev_group;
struct spdk_nvme_poll_group *group;
struct nvme_poll_group *group;
struct spdk_nvme_poll_group_stat *stat;
struct spdk_nvme_transport_poll_group_stat *tr_stat;
uint32_t j;
@ -1012,10 +1011,9 @@ rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i)
ctx = spdk_io_channel_iter_get_ctx(i);
ch = spdk_io_channel_iter_get_channel(i);
bdev_group = spdk_io_channel_get_ctx(ch);
group = bdev_group->group;
group = spdk_io_channel_get_ctx(ch);
rc = spdk_nvme_poll_group_get_stats(group, &stat);
rc = spdk_nvme_poll_group_get_stats(group->group, &stat);
if (rc) {
spdk_for_each_channel_continue(i, rc);
return;
@ -1047,7 +1045,7 @@ rpc_bdev_nvme_stats_per_channel(struct spdk_io_channel_iter *i)
spdk_json_write_array_end(ctx->w);
spdk_json_write_object_end(ctx->w);
spdk_nvme_poll_group_free_stats(group, stat);
spdk_nvme_poll_group_free_stats(group->group, stat);
spdk_for_each_channel_continue(i, 0);
}

View File

@ -123,7 +123,7 @@ struct nvme_bdev {
bool opal;
};
struct nvme_bdev_poll_group {
struct nvme_poll_group {
struct spdk_nvme_poll_group *group;
struct spdk_io_channel *accel_channel;
struct spdk_poller *poller;
@ -157,7 +157,7 @@ struct ocssd_io_channel;
struct nvme_ctrlr_channel {
struct nvme_ctrlr *ctrlr;
struct spdk_nvme_qpair *qpair;
struct nvme_bdev_poll_group *group;
struct nvme_poll_group *group;
TAILQ_HEAD(, spdk_bdev_io) pending_resets;
struct ocssd_io_channel *ocssd_ch;
};