nvmf: pass poll group to transport during create
For the benefit of forthcoming vfio-user changes, register the poll group poller prior to calling the transport create callback, and pass in a pointer to the poll group itself. Signed-off-by: John Levon <john.levon@nutanix.com> Change-Id: Idbc24126c9d46f8162e4ded07c5a0ecf074fc7dd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10718 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com> Community-CI: Mellanox Build Bot Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
parent
8c32f0d32f
commit
5e37316308
@ -317,7 +317,8 @@ struct spdk_nvmf_transport_ops {
|
||||
/**
|
||||
* Create a new poll group
|
||||
*/
|
||||
struct spdk_nvmf_transport_poll_group *(*poll_group_create)(struct spdk_nvmf_transport *transport);
|
||||
struct spdk_nvmf_transport_poll_group *(*poll_group_create)(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group);
|
||||
|
||||
/**
|
||||
* Get the polling group of the queue pair optimal for the specific transport
|
||||
|
@ -2107,7 +2107,8 @@ nvmf_fc_discover(struct spdk_nvmf_transport *transport,
|
||||
}
|
||||
|
||||
static struct spdk_nvmf_transport_poll_group *
|
||||
nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
struct spdk_nvmf_fc_poll_group *fgroup;
|
||||
struct spdk_nvmf_fc_transport *ftransport =
|
||||
|
@ -178,6 +178,10 @@ nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
|
||||
TAILQ_INIT(&group->qpairs);
|
||||
group->thread = thread;
|
||||
|
||||
group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
|
||||
|
||||
SPDK_DTRACE_PROBE1(nvmf_create_poll_group, spdk_thread_get_id(thread));
|
||||
|
||||
TAILQ_FOREACH(transport, &tgt->transports, link) {
|
||||
rc = nvmf_poll_group_add_transport(group, transport);
|
||||
if (rc != 0) {
|
||||
@ -211,10 +215,6 @@ nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
|
||||
TAILQ_INSERT_TAIL(&tgt->poll_groups, group, link);
|
||||
pthread_mutex_unlock(&tgt->mutex);
|
||||
|
||||
group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
|
||||
|
||||
SPDK_DTRACE_PROBE1(nvmf_create_poll_group, spdk_thread_get_id(thread));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1184,7 +1184,7 @@ nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
|
||||
}
|
||||
}
|
||||
|
||||
tgroup = nvmf_transport_poll_group_create(transport);
|
||||
tgroup = nvmf_transport_poll_group_create(transport, group);
|
||||
if (!tgroup) {
|
||||
SPDK_ERRLOG("Unable to create poll group for transport\n");
|
||||
return -1;
|
||||
|
@ -3242,7 +3242,8 @@ static void
|
||||
nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group);
|
||||
|
||||
static struct spdk_nvmf_transport_poll_group *
|
||||
nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
struct spdk_nvmf_rdma_transport *rtransport;
|
||||
struct spdk_nvmf_rdma_poll_group *rgroup;
|
||||
|
@ -1225,7 +1225,8 @@ nvmf_tcp_control_msg_list_free(struct spdk_nvmf_tcp_control_msg_list *list)
|
||||
}
|
||||
|
||||
static struct spdk_nvmf_transport_poll_group *
|
||||
nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
struct spdk_nvmf_tcp_transport *ttransport;
|
||||
struct spdk_nvmf_tcp_poll_group *tgroup;
|
||||
|
@ -464,53 +464,54 @@ nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
||||
}
|
||||
|
||||
struct spdk_nvmf_transport_poll_group *
|
||||
nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
struct spdk_nvmf_transport_poll_group *group;
|
||||
struct spdk_nvmf_transport_poll_group *tgroup;
|
||||
struct spdk_nvmf_transport_pg_cache_buf **bufs;
|
||||
uint32_t i;
|
||||
|
||||
group = transport->ops->poll_group_create(transport);
|
||||
if (!group) {
|
||||
tgroup = transport->ops->poll_group_create(transport, group);
|
||||
if (!tgroup) {
|
||||
return NULL;
|
||||
}
|
||||
group->transport = transport;
|
||||
tgroup->transport = transport;
|
||||
|
||||
STAILQ_INIT(&group->pending_buf_queue);
|
||||
STAILQ_INIT(&group->buf_cache);
|
||||
STAILQ_INIT(&tgroup->pending_buf_queue);
|
||||
STAILQ_INIT(&tgroup->buf_cache);
|
||||
|
||||
if (transport->opts.buf_cache_size) {
|
||||
group->buf_cache_size = transport->opts.buf_cache_size;
|
||||
bufs = calloc(group->buf_cache_size, sizeof(struct spdk_nvmf_transport_pg_cache_buf *));
|
||||
tgroup->buf_cache_size = transport->opts.buf_cache_size;
|
||||
bufs = calloc(tgroup->buf_cache_size, sizeof(struct spdk_nvmf_transport_pg_cache_buf *));
|
||||
|
||||
if (!bufs) {
|
||||
SPDK_ERRLOG("Memory allocation failed, can't reserve buffers for the pg buffer cache\n");
|
||||
return group;
|
||||
return tgroup;
|
||||
}
|
||||
|
||||
if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, group->buf_cache_size)) {
|
||||
group->buf_cache_size = (uint32_t)spdk_mempool_count(transport->data_buf_pool);
|
||||
if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, tgroup->buf_cache_size)) {
|
||||
tgroup->buf_cache_size = (uint32_t)spdk_mempool_count(transport->data_buf_pool);
|
||||
SPDK_NOTICELOG("Unable to reserve the full number of buffers for the pg buffer cache. "
|
||||
"Decrease the number of cached buffers from %u to %u\n",
|
||||
transport->opts.buf_cache_size, group->buf_cache_size);
|
||||
transport->opts.buf_cache_size, tgroup->buf_cache_size);
|
||||
/* Sanity check */
|
||||
assert(group->buf_cache_size <= transport->opts.buf_cache_size);
|
||||
assert(tgroup->buf_cache_size <= transport->opts.buf_cache_size);
|
||||
/* Try again with less number of buffers */
|
||||
if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, group->buf_cache_size)) {
|
||||
SPDK_NOTICELOG("Failed to reserve %u buffers\n", group->buf_cache_size);
|
||||
group->buf_cache_size = 0;
|
||||
if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, tgroup->buf_cache_size)) {
|
||||
SPDK_NOTICELOG("Failed to reserve %u buffers\n", tgroup->buf_cache_size);
|
||||
tgroup->buf_cache_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < group->buf_cache_size; i++) {
|
||||
STAILQ_INSERT_HEAD(&group->buf_cache, bufs[i], link);
|
||||
for (i = 0; i < tgroup->buf_cache_size; i++) {
|
||||
STAILQ_INSERT_HEAD(&tgroup->buf_cache, bufs[i], link);
|
||||
}
|
||||
group->buf_cache_count = group->buf_cache_size;
|
||||
tgroup->buf_cache_count = tgroup->buf_cache_size;
|
||||
|
||||
free(bufs);
|
||||
}
|
||||
|
||||
return group;
|
||||
return tgroup;
|
||||
}
|
||||
|
||||
struct spdk_nvmf_transport_poll_group *
|
||||
|
@ -45,7 +45,7 @@ void nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_discovery_log_page_entry *entry);
|
||||
|
||||
struct spdk_nvmf_transport_poll_group *nvmf_transport_poll_group_create(
|
||||
struct spdk_nvmf_transport *transport);
|
||||
struct spdk_nvmf_transport *transport, struct spdk_nvmf_poll_group *group);
|
||||
struct spdk_nvmf_transport_poll_group *nvmf_transport_get_optimal_poll_group(
|
||||
struct spdk_nvmf_transport *transport, struct spdk_nvmf_qpair *qpair);
|
||||
|
||||
|
@ -3399,7 +3399,8 @@ nvmf_vfio_user_discover(struct spdk_nvmf_transport *transport,
|
||||
{ }
|
||||
|
||||
static struct spdk_nvmf_transport_poll_group *
|
||||
nvmf_vfio_user_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
nvmf_vfio_user_poll_group_create(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
struct nvmf_vfio_user_transport *vu_transport;
|
||||
struct nvmf_vfio_user_poll_group *vu_group;
|
||||
|
@ -45,7 +45,8 @@ DEFINE_STUB_V(nvmf_transport_qpair_fini, (struct spdk_nvmf_qpair *qpair,
|
||||
DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
|
||||
DEFINE_STUB_V(nvmf_qpair_abort_pending_zcopy_reqs, (struct spdk_nvmf_qpair *qpair));
|
||||
DEFINE_STUB(nvmf_transport_poll_group_create, struct spdk_nvmf_transport_poll_group *,
|
||||
(struct spdk_nvmf_transport *transport), NULL);
|
||||
(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group), NULL);
|
||||
DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
|
||||
NULL);
|
||||
DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
|
||||
|
@ -817,7 +817,7 @@ test_nvmf_rdma_get_optimal_poll_group(void)
|
||||
TAILQ_INIT(&rtransport.poll_groups);
|
||||
|
||||
for (i = 0; i < TEST_GROUPS_COUNT; i++) {
|
||||
groups[i] = nvmf_rdma_poll_group_create(transport);
|
||||
groups[i] = nvmf_rdma_poll_group_create(transport, NULL);
|
||||
CU_ASSERT(groups[i] != NULL);
|
||||
rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
|
||||
groups[i]->transport = transport;
|
||||
|
@ -514,7 +514,7 @@ test_nvmf_tcp_poll_group_create(void)
|
||||
CU_ASSERT_PTR_NOT_NULL(transport);
|
||||
transport->opts = opts;
|
||||
MOCK_SET(spdk_sock_group_create, &grp);
|
||||
group = nvmf_tcp_poll_group_create(transport);
|
||||
group = nvmf_tcp_poll_group_create(transport, NULL);
|
||||
MOCK_CLEAR_P(spdk_sock_group_create);
|
||||
SPDK_CU_ASSERT_FATAL(group);
|
||||
if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
|
||||
|
@ -176,13 +176,14 @@ test_spdk_nvmf_transport_create(void)
|
||||
}
|
||||
|
||||
static struct spdk_nvmf_transport_poll_group *
|
||||
ut_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
ut_poll_group_create(struct spdk_nvmf_transport *transport,
|
||||
struct spdk_nvmf_poll_group *group)
|
||||
{
|
||||
struct spdk_nvmf_transport_poll_group *group;
|
||||
struct spdk_nvmf_transport_poll_group *tgroup;
|
||||
|
||||
group = calloc(1, sizeof(*group));
|
||||
SPDK_CU_ASSERT_FATAL(group != NULL);
|
||||
return group;
|
||||
tgroup = calloc(1, sizeof(*tgroup));
|
||||
SPDK_CU_ASSERT_FATAL(tgroup != NULL);
|
||||
return tgroup;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -204,7 +205,7 @@ test_nvmf_transport_poll_group_create(void)
|
||||
transport.opts.buf_cache_size = SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE;
|
||||
transport.data_buf_pool = spdk_mempool_create("buf_pool", 32, 4096, 0, 0);
|
||||
|
||||
poll_group = nvmf_transport_poll_group_create(&transport);
|
||||
poll_group = nvmf_transport_poll_group_create(&transport, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(poll_group != NULL);
|
||||
CU_ASSERT(poll_group->transport == &transport);
|
||||
CU_ASSERT(poll_group->buf_cache_size == SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE);
|
||||
@ -216,7 +217,7 @@ test_nvmf_transport_poll_group_create(void)
|
||||
/* Mempool members insufficient */
|
||||
transport.data_buf_pool = spdk_mempool_create("buf_pool", 31, 4096, 0, 0);
|
||||
|
||||
poll_group = nvmf_transport_poll_group_create(&transport);
|
||||
poll_group = nvmf_transport_poll_group_create(&transport, NULL);
|
||||
SPDK_CU_ASSERT_FATAL(poll_group != NULL);
|
||||
CU_ASSERT(poll_group->transport == &transport);
|
||||
CU_ASSERT(poll_group->buf_cache_size == 31);
|
||||
|
Loading…
Reference in New Issue
Block a user