example/nvme: Replace next pointer by TAILQ except for fio_plugin and perf

This will make the object relationship cleaner and the asynchronous
detach operation easier to implement.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I5030dc9eb8f607247f08b4524d37ec2b74826a93
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4430
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Shuhei Matsumoto 2020-09-27 11:51:01 +09:00 committed by Tomasz Zawadzki
parent 6d87bc7a8a
commit 4c3fd22850
4 changed files with 158 additions and 256 deletions

View File

@ -45,7 +45,7 @@ struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr;
enum spdk_nvme_transport_type trtype;
struct ctrlr_entry *next;
TAILQ_ENTRY(ctrlr_entry) link;
char name[1024];
};
@ -53,7 +53,7 @@ struct ns_entry {
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns;
struct ns_entry *next;
TAILQ_ENTRY(ns_entry) link;
uint32_t io_size_blocks;
uint32_t num_io_requests;
uint64_t size_in_ios;
@ -71,7 +71,7 @@ struct ctrlr_worker_ctx {
uint64_t abort_failed;
uint64_t current_queue_depth;
struct spdk_nvme_ctrlr *ctrlr;
struct ctrlr_worker_ctx *next;
TAILQ_ENTRY(ctrlr_worker_ctx) link;
};
struct ns_worker_ctx {
@ -85,7 +85,7 @@ struct ns_worker_ctx {
bool is_draining;
struct spdk_nvme_qpair *qpair;
struct ctrlr_worker_ctx *ctrlr_ctx;
struct ns_worker_ctx *next;
TAILQ_ENTRY(ns_worker_ctx) link;
};
struct perf_task {
@ -94,18 +94,18 @@ struct perf_task {
};
struct worker_thread {
struct ns_worker_ctx *ns_ctx;
struct ctrlr_worker_ctx *ctrlr_ctx;
struct worker_thread *next;
TAILQ_HEAD(, ns_worker_ctx) ns_ctx;
TAILQ_HEAD(, ctrlr_worker_ctx) ctrlr_ctx;
TAILQ_ENTRY(worker_thread) link;
unsigned lcore;
};
static const char *g_workload_type = "read";
static struct ctrlr_entry *g_controllers;
static struct ns_entry *g_namespaces;
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static int g_num_namespaces;
static struct worker_thread *g_workers;
static int g_num_workers;
static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
static int g_num_workers = 0;
static uint32_t g_master_core;
static int g_abort_interval = 1;
@ -248,19 +248,17 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
build_nvme_ns_name(entry->name, sizeof(entry->name), ctrlr, spdk_nvme_ns_get_id(ns));
g_num_namespaces++;
entry->next = g_namespaces;
g_namespaces = entry;
TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
}
static void
unregister_namespaces(void)
{
struct ns_entry *entry = g_namespaces;
struct ns_entry *entry, *tmp;
while (entry) {
struct ns_entry *next = entry->next;
TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp) {
TAILQ_REMOVE(&g_namespaces, entry, link);
free(entry);
entry = next;
}
}
@ -280,8 +278,7 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr, struct trid_entry *trid_entry)
entry->ctrlr = ctrlr;
entry->trtype = trid_entry->trid.trtype;
entry->next = g_controllers;
g_controllers = entry;
TAILQ_INSERT_TAIL(&g_controllers, entry, link);
if (trid_entry->nsid == 0) {
for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
@ -455,8 +452,7 @@ work_fn(void *arg)
uint32_t unfinished_ctx;
/* Allocate queue pair for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
ns_entry = ns_ctx->entry;
spdk_nvme_ctrlr_get_default_io_qpair_opts(ns_entry->ctrlr, &opts, sizeof(opts));
@ -469,34 +465,26 @@ work_fn(void *arg)
fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair failed\n");
return 1;
}
ns_ctx = ns_ctx->next;
}
tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
/* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
submit_io(ns_ctx, g_queue_depth);
ns_ctx = ns_ctx->next;
}
while (1) {
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
spdk_nvme_qpair_process_completions(ns_ctx->qpair, 0);
ns_ctx = ns_ctx->next;
}
if (worker->lcore == g_master_core) {
ctrlr_ctx = worker->ctrlr_ctx;
while (ctrlr_ctx) {
TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
/* Hold mutex to guard ctrlr_ctx->current_queue_depth. */
pthread_mutex_lock(&ctrlr_ctx->mutex);
spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr);
pthread_mutex_unlock(&ctrlr_ctx->mutex);
ctrlr_ctx = ctrlr_ctx->next;
}
}
@ -508,8 +496,7 @@ work_fn(void *arg)
do {
unfinished_ctx = 0;
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
if (!ns_ctx->is_draining) {
ns_ctx->is_draining = true;
}
@ -521,7 +508,6 @@ work_fn(void *arg)
unfinished_ctx++;
}
}
ns_ctx = ns_ctx->next;
}
} while (unfinished_ctx > 0);
@ -529,8 +515,7 @@ work_fn(void *arg)
do {
unfinished_ctx = 0;
ctrlr_ctx = worker->ctrlr_ctx;
while (ctrlr_ctx != NULL) {
TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
pthread_mutex_lock(&ctrlr_ctx->mutex);
if (ctrlr_ctx->current_queue_depth > 0) {
spdk_nvme_ctrlr_process_admin_completions(ctrlr_ctx->ctrlr);
@ -539,7 +524,6 @@ work_fn(void *arg)
}
}
pthread_mutex_unlock(&ctrlr_ctx->mutex);
ctrlr_ctx = ctrlr_ctx->next;
}
} while (unfinished_ctx > 0);
}
@ -812,9 +796,6 @@ register_workers(void)
uint32_t i;
struct worker_thread *worker;
g_workers = NULL;
g_num_workers = 0;
SPDK_ENV_FOREACH_CORE(i) {
worker = calloc(1, sizeof(*worker));
if (worker == NULL) {
@ -822,9 +803,10 @@ register_workers(void)
return -1;
}
TAILQ_INIT(&worker->ns_ctx);
TAILQ_INIT(&worker->ctrlr_ctx);
worker->lcore = i;
worker->next = g_workers;
g_workers = worker;
TAILQ_INSERT_TAIL(&g_workers, worker, link);
g_num_workers++;
}
@ -834,27 +816,23 @@ register_workers(void)
static void
unregister_workers(void)
{
struct worker_thread *worker = g_workers;
struct worker_thread *worker, *tmp_worker;
struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
struct ctrlr_worker_ctx *ctrlr_ctx, *tmp_ctrlr_ctx;
/* Free namespace context and worker thread */
while (worker) {
struct worker_thread *next_worker = worker->next;
struct ns_worker_ctx *ns_ctx = worker->ns_ctx;
while (ns_ctx) {
struct ns_worker_ctx *next_ns_ctx = ns_ctx->next;
TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
TAILQ_REMOVE(&g_workers, worker, link);
TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
printf("NS: %s I/O completed: %lu, failed: %lu\n",
ns_ctx->entry->name, ns_ctx->io_completed, ns_ctx->io_failed);
free(ns_ctx);
ns_ctx = next_ns_ctx;
}
struct ctrlr_worker_ctx *ctrlr_ctx = worker->ctrlr_ctx;
while (ctrlr_ctx) {
struct ctrlr_worker_ctx *next_ctrlr_ctx = ctrlr_ctx->next;
TAILQ_FOREACH_SAFE(ctrlr_ctx, &worker->ctrlr_ctx, link, tmp_ctrlr_ctx) {
TAILQ_REMOVE(&worker->ctrlr_ctx, ctrlr_ctx, link);
printf("CTRLR: %s abort submitted %lu, failed to submit %lu\n",
ctrlr_ctx->entry->name, ctrlr_ctx->abort_submitted,
ctrlr_ctx->abort_submit_failed);
@ -862,11 +840,9 @@ unregister_workers(void)
ctrlr_ctx->successful_abort, ctrlr_ctx->unsuccessful_abort,
ctrlr_ctx->abort_failed);
free(ctrlr_ctx);
ctrlr_ctx = next_ctrlr_ctx;
}
free(worker);
worker = next_worker;
}
}
@ -931,35 +907,33 @@ register_controllers(void)
static void
unregister_controllers(void)
{
struct ctrlr_entry *entry = g_controllers;
struct ctrlr_entry *entry, *tmp;
while (entry) {
struct ctrlr_entry *next = entry->next;
TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
TAILQ_REMOVE(&g_controllers, entry, link);
spdk_nvme_detach(entry->ctrlr);
free(entry);
entry = next;
}
}
static int
associate_master_worker_with_ctrlr(void)
{
struct ctrlr_entry *entry = g_controllers;
struct worker_thread *worker = g_workers;
struct ctrlr_entry *entry;
struct worker_thread *worker;
struct ctrlr_worker_ctx *ctrlr_ctx;
while (worker) {
TAILQ_FOREACH(worker, &g_workers, link) {
if (worker->lcore == g_master_core) {
break;
}
worker = worker->next;
}
if (!worker) {
return -1;
}
while (entry) {
TAILQ_FOREACH(entry, &g_controllers, link) {
ctrlr_ctx = calloc(1, sizeof(struct ctrlr_worker_ctx));
if (!ctrlr_ctx) {
return -1;
@ -968,10 +942,8 @@ associate_master_worker_with_ctrlr(void)
pthread_mutex_init(&ctrlr_ctx->mutex, NULL);
ctrlr_ctx->entry = entry;
ctrlr_ctx->ctrlr = entry->ctrlr;
ctrlr_ctx->next = worker->ctrlr_ctx;
worker->ctrlr_ctx = ctrlr_ctx;
entry = entry->next;
TAILQ_INSERT_TAIL(&worker->ctrlr_ctx, ctrlr_ctx, link);
}
return 0;
@ -980,27 +952,23 @@ associate_master_worker_with_ctrlr(void)
static struct ctrlr_worker_ctx *
get_ctrlr_worker_ctx(struct spdk_nvme_ctrlr *ctrlr)
{
struct worker_thread *worker = g_workers;
struct worker_thread *worker;
struct ctrlr_worker_ctx *ctrlr_ctx;
while (worker != NULL) {
TAILQ_FOREACH(worker, &g_workers, link) {
if (worker->lcore == g_master_core) {
break;
}
worker = worker->next;
}
if (!worker) {
return NULL;
}
ctrlr_ctx = worker->ctrlr_ctx;
while (ctrlr_ctx != NULL) {
TAILQ_FOREACH(ctrlr_ctx, &worker->ctrlr_ctx, link) {
if (ctrlr_ctx->ctrlr == ctrlr) {
return ctrlr_ctx;
}
ctrlr_ctx = ctrlr_ctx->next;
}
return NULL;
@ -1009,8 +977,8 @@ get_ctrlr_worker_ctx(struct spdk_nvme_ctrlr *ctrlr)
static int
associate_workers_with_ns(void)
{
struct ns_entry *entry = g_namespaces;
struct worker_thread *worker = g_workers;
struct ns_entry *entry = TAILQ_FIRST(&g_namespaces);
struct worker_thread *worker = TAILQ_FIRST(&g_workers);
struct ns_worker_ctx *ns_ctx;
int i, count;
@ -1034,17 +1002,16 @@ associate_workers_with_ns(void)
return -1;
}
ns_ctx->next = worker->ns_ctx;
worker->ns_ctx = ns_ctx;
TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
worker = worker->next;
worker = TAILQ_NEXT(worker, link);
if (worker == NULL) {
worker = g_workers;
worker = TAILQ_FIRST(&g_workers);
}
entry = entry->next;
entry = TAILQ_NEXT(entry, link);
if (entry == NULL) {
entry = g_namespaces;
entry = TAILQ_FIRST(&g_namespaces);
}
}
@ -1117,15 +1084,13 @@ int main(int argc, char **argv)
/* Launch all of the slave workers */
g_master_core = spdk_env_get_current_core();
master_worker = NULL;
worker = g_workers;
while (worker != NULL) {
TAILQ_FOREACH(worker, &g_workers, link) {
if (worker->lcore != g_master_core) {
spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
} else {
assert(master_worker == NULL);
master_worker = worker;
}
worker = worker->next;
}
assert(master_worker != NULL);

View File

@ -41,7 +41,7 @@
struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_intel_rw_latency_page latency_page;
struct ctrlr_entry *next;
TAILQ_ENTRY(ctrlr_entry) link;
char name[1024];
};
@ -51,7 +51,7 @@ struct ns_entry {
struct spdk_nvme_ns *ns;
} nvme;
struct ns_entry *next;
TAILQ_ENTRY(ns_entry) link;
uint32_t io_size_blocks;
uint64_t size_in_ios;
char name[1024];
@ -64,7 +64,7 @@ struct ns_worker_ctx {
uint64_t offset_in_ios;
bool is_draining;
struct spdk_nvme_qpair *qpair;
struct ns_worker_ctx *next;
TAILQ_ENTRY(ns_worker_ctx) link;
};
struct arb_task {
@ -73,8 +73,8 @@ struct arb_task {
};
struct worker_thread {
struct ns_worker_ctx *ns_ctx;
struct worker_thread *next;
TAILQ_HEAD(, ns_worker_ctx) ns_ctx;
TAILQ_ENTRY(worker_thread) link;
unsigned lcore;
enum spdk_nvme_qprio qprio;
};
@ -106,9 +106,9 @@ struct feature {
static struct spdk_mempool *task_pool = NULL;
static struct ctrlr_entry *g_controllers = NULL;
static struct ns_entry *g_namespaces = NULL;
static struct worker_thread *g_workers = NULL;
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
static struct feature features[SPDK_NVME_FEAT_ARBITRATION + 1] = {};
@ -184,8 +184,7 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
g_arbitration.num_namespaces++;
entry->next = g_namespaces;
g_namespaces = entry;
TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
}
static void
@ -239,8 +238,7 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
entry->ctrlr = ctrlr;
entry->next = g_controllers;
g_controllers = entry;
TAILQ_INSERT_TAIL(&g_controllers, entry, link);
if ((g_arbitration.latency_tracking_enable != 0) &&
spdk_nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) {
@ -399,30 +397,25 @@ cleanup_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
static void
cleanup(uint32_t task_count)
{
struct ns_entry *entry = g_namespaces;
struct ns_entry *next_entry = NULL;
struct worker_thread *worker = g_workers;
struct worker_thread *next_worker = NULL;
struct ns_entry *entry, *tmp_entry;
struct worker_thread *worker, *tmp_worker;
struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
while (entry) {
next_entry = entry->next;
TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp_entry) {
TAILQ_REMOVE(&g_namespaces, entry, link);
free(entry);
entry = next_entry;
};
while (worker) {
struct ns_worker_ctx *ns_ctx = worker->ns_ctx;
TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
TAILQ_REMOVE(&g_workers, worker, link);
/* ns_worker_ctx is a list in the worker */
while (ns_ctx) {
struct ns_worker_ctx *next_ns_ctx = ns_ctx->next;
TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
free(ns_ctx);
ns_ctx = next_ns_ctx;
}
next_worker = worker->next;
free(worker);
worker = next_worker;
};
if (spdk_mempool_count(task_pool) != (size_t)task_count) {
@ -437,28 +430,23 @@ work_fn(void *arg)
{
uint64_t tsc_end;
struct worker_thread *worker = (struct worker_thread *)arg;
struct ns_worker_ctx *ns_ctx = NULL;
struct ns_worker_ctx *ns_ctx;
printf("Starting thread on core %u with %s\n", worker->lcore, print_qprio(worker->qprio));
/* Allocate a queue pair for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
if (init_ns_worker_ctx(ns_ctx, worker->qprio) != 0) {
printf("ERROR: init_ns_worker_ctx() failed\n");
return 1;
}
ns_ctx = ns_ctx->next;
}
tsc_end = spdk_get_ticks() + g_arbitration.time_in_sec * g_arbitration.tsc_rate;
/* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
submit_io(ns_ctx, g_arbitration.queue_depth);
ns_ctx = ns_ctx->next;
}
while (1) {
@ -467,10 +455,8 @@ work_fn(void *arg)
* I/O will be submitted in the io_complete callback
* to replace each I/O that is completed.
*/
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
check_io(ns_ctx);
ns_ctx = ns_ctx->next;
}
if (spdk_get_ticks() > tsc_end) {
@ -478,11 +464,9 @@ work_fn(void *arg)
}
}
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
drain_io(ns_ctx);
cleanup_ns_worker_ctx(ns_ctx);
ns_ctx = ns_ctx->next;
}
return 0;
@ -562,18 +546,14 @@ print_performance(void)
struct worker_thread *worker;
struct ns_worker_ctx *ns_ctx;
worker = g_workers;
while (worker) {
ns_ctx = worker->ns_ctx;
while (ns_ctx) {
TAILQ_FOREACH(worker, &g_workers, link) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
io_per_second = (float)ns_ctx->io_completed / g_arbitration.time_in_sec;
sent_all_io_in_secs = g_arbitration.io_count / io_per_second;
printf("%-43.43s core %u: %8.2f IO/s %8.2f secs/%d ios\n",
ns_ctx->entry->name, worker->lcore,
io_per_second, sent_all_io_in_secs, g_arbitration.io_count);
ns_ctx = ns_ctx->next;
}
worker = worker->next;
}
printf("========================================================\n");
@ -613,8 +593,7 @@ print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_
printf("%s Latency Statistics:\n", op_name);
printf("========================================================\n");
ctrlr = g_controllers;
while (ctrlr) {
TAILQ_FOREACH(ctrlr, &g_controllers, link) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
if (spdk_nvme_ctrlr_cmd_get_log_page(
ctrlr->ctrlr, log_page,
@ -633,23 +612,18 @@ print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_
printf("Controller %s: %s latency statistics not supported\n",
ctrlr->name, op_name);
}
ctrlr = ctrlr->next;
}
while (g_arbitration.outstanding_commands) {
ctrlr = g_controllers;
while (ctrlr) {
TAILQ_FOREACH(ctrlr, &g_controllers, link) {
spdk_nvme_ctrlr_process_admin_completions(ctrlr->ctrlr);
ctrlr = ctrlr->next;
}
}
ctrlr = g_controllers;
while (ctrlr) {
TAILQ_FOREACH(ctrlr, &g_controllers, link) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
print_latency_page(ctrlr);
}
ctrlr = ctrlr->next;
}
printf("\n");
}
@ -825,9 +799,6 @@ register_workers(void)
struct worker_thread *worker;
enum spdk_nvme_qprio qprio = SPDK_NVME_QPRIO_URGENT;
g_workers = NULL;
g_arbitration.num_workers = 0;
SPDK_ENV_FOREACH_CORE(i) {
worker = calloc(1, sizeof(*worker));
if (worker == NULL) {
@ -835,9 +806,9 @@ register_workers(void)
return -1;
}
TAILQ_INIT(&worker->ns_ctx);
worker->lcore = i;
worker->next = g_workers;
g_workers = worker;
TAILQ_INSERT_TAIL(&g_workers, worker, link);
g_arbitration.num_workers++;
if (g_arbitration.arbitration_mechanism == SPDK_NVME_CAP_AMS_WRR) {
@ -895,25 +866,24 @@ register_controllers(void)
static void
unregister_controllers(void)
{
struct ctrlr_entry *entry = g_controllers;
struct ctrlr_entry *entry, *tmp;
while (entry) {
struct ctrlr_entry *next = entry->next;
TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
TAILQ_REMOVE(&g_controllers, entry, link);
if (g_arbitration.latency_tracking_enable &&
spdk_nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) {
set_latency_tracking_feature(entry->ctrlr, false);
}
spdk_nvme_detach(entry->ctrlr);
free(entry);
entry = next;
}
}
static int
associate_workers_with_ns(void)
{
struct ns_entry *entry = g_namespaces;
struct worker_thread *worker = g_workers;
struct ns_entry *entry = TAILQ_FIRST(&g_namespaces);
struct worker_thread *worker = TAILQ_FIRST(&g_workers);
struct ns_worker_ctx *ns_ctx;
int i, count;
@ -933,17 +903,16 @@ associate_workers_with_ns(void)
printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
ns_ctx->entry = entry;
ns_ctx->next = worker->ns_ctx;
worker->ns_ctx = ns_ctx;
TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
worker = worker->next;
worker = TAILQ_NEXT(worker, link);
if (worker == NULL) {
worker = g_workers;
worker = TAILQ_FIRST(&g_workers);
}
entry = entry->next;
entry = TAILQ_NEXT(entry, link);
if (entry == NULL) {
entry = g_namespaces;
entry = TAILQ_FIRST(&g_namespaces);
}
}
@ -1128,15 +1097,13 @@ main(int argc, char **argv)
/* Launch all of the slave workers */
master_core = spdk_env_get_current_core();
master_worker = NULL;
worker = g_workers;
while (worker != NULL) {
TAILQ_FOREACH(worker, &g_workers, link) {
if (worker->lcore != master_core) {
spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
} else {
assert(master_worker == NULL);
master_worker = worker;
}
worker = worker->next;
}
assert(master_worker != NULL);

View File

@ -38,20 +38,20 @@
#include "spdk/env.h"
struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr;
struct ctrlr_entry *next;
char name[1024];
struct spdk_nvme_ctrlr *ctrlr;
TAILQ_ENTRY(ctrlr_entry) link;
char name[1024];
};
struct ns_entry {
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns;
struct ns_entry *next;
TAILQ_ENTRY(ns_entry) link;
struct spdk_nvme_qpair *qpair;
};
static struct ctrlr_entry *g_controllers = NULL;
static struct ns_entry *g_namespaces = NULL;
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static bool g_vmd = false;
@ -72,8 +72,7 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
entry->ctrlr = ctrlr;
entry->ns = ns;
entry->next = g_namespaces;
g_namespaces = entry;
TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
printf(" Namespace ID: %d size: %juGB\n", spdk_nvme_ns_get_id(ns),
spdk_nvme_ns_get_size(ns) / 1000000000);
@ -162,8 +161,7 @@ hello_world(void)
int rc;
size_t sz;
ns_entry = g_namespaces;
while (ns_entry != NULL) {
TAILQ_FOREACH(ns_entry, &g_namespaces, link) {
/*
* Allocate an I/O qpair that we can use to submit read/write requests
* to namespaces on the controller. NVMe controllers typically support
@ -259,7 +257,6 @@ hello_world(void)
* pending I/O are completed before trying to free the qpair.
*/
spdk_nvme_ctrlr_free_io_qpair(ns_entry->qpair);
ns_entry = ns_entry->next;
}
}
@ -302,8 +299,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
entry->ctrlr = ctrlr;
entry->next = g_controllers;
g_controllers = entry;
TAILQ_INSERT_TAIL(&g_controllers, entry, link);
/*
* Each controller has one or more namespaces. An NVMe namespace is basically
@ -327,21 +323,18 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
static void
cleanup(void)
{
struct ns_entry *ns_entry = g_namespaces;
struct ctrlr_entry *ctrlr_entry = g_controllers;
struct ns_entry *ns_entry, *tmp_ns_entry;
struct ctrlr_entry *ctrlr_entry, *tmp_ctrlr_entry;
while (ns_entry) {
struct ns_entry *next = ns_entry->next;
TAILQ_FOREACH_SAFE(ns_entry, &g_namespaces, link, tmp_ns_entry) {
TAILQ_REMOVE(&g_namespaces, ns_entry, link);
free(ns_entry);
ns_entry = next;
}
while (ctrlr_entry) {
struct ctrlr_entry *next = ctrlr_entry->next;
TAILQ_FOREACH_SAFE(ctrlr_entry, &g_controllers, link, tmp_ctrlr_entry) {
TAILQ_REMOVE(&g_controllers, ctrlr_entry, link);
spdk_nvme_detach(ctrlr_entry->ctrlr);
free(ctrlr_entry);
ctrlr_entry = next;
}
}
@ -418,7 +411,7 @@ int main(int argc, char **argv)
return 1;
}
if (g_controllers == NULL) {
if (TAILQ_EMPTY(&g_controllers)) {
fprintf(stderr, "no NVMe controllers found\n");
cleanup();
return 1;

View File

@ -47,7 +47,7 @@ struct ctrlr_entry {
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_transport_id failover_trid;
enum spdk_nvme_transport_type trtype;
struct ctrlr_entry *next;
TAILQ_ENTRY(ctrlr_entry) link;
char name[1024];
int num_resets;
};
@ -56,7 +56,7 @@ struct ns_entry {
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns;
struct ns_entry *next;
TAILQ_ENTRY(ns_entry) link;
uint32_t io_size_blocks;
uint32_t num_io_requests;
uint64_t size_in_ios;
@ -66,17 +66,17 @@ struct ns_entry {
};
struct ns_worker_ctx {
struct ns_entry *entry;
uint64_t io_completed;
uint64_t current_queue_depth;
uint64_t offset_in_ios;
bool is_draining;
struct ns_entry *entry;
uint64_t io_completed;
uint64_t current_queue_depth;
uint64_t offset_in_ios;
bool is_draining;
int num_qpairs;
struct spdk_nvme_qpair **qpair;
int last_qpair;
int num_qpairs;
struct spdk_nvme_qpair **qpair;
int last_qpair;
struct ns_worker_ctx *next;
TAILQ_ENTRY(ns_worker_ctx) link;
};
struct perf_task {
@ -86,18 +86,18 @@ struct perf_task {
};
struct worker_thread {
struct ns_worker_ctx *ns_ctx;
struct worker_thread *next;
unsigned lcore;
TAILQ_HEAD(, ns_worker_ctx) ns_ctx;
TAILQ_ENTRY(worker_thread) link;
unsigned lcore;
};
/* For basic reset handling. */
static int g_max_ctrlr_resets = 15;
static struct ctrlr_entry *g_controllers = NULL;
static struct ns_entry *g_namespaces = NULL;
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
static int g_num_namespaces = 0;
static struct worker_thread *g_workers = NULL;
static TAILQ_HEAD(, worker_thread) g_workers = TAILQ_HEAD_INITIALIZER(g_workers);
static int g_num_workers = 0;
static uint64_t g_tsc_rate;
@ -347,19 +347,17 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
build_nvme_name(entry->name, sizeof(entry->name), ctrlr);
g_num_namespaces++;
entry->next = g_namespaces;
g_namespaces = entry;
TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
}
static void
unregister_namespaces(void)
{
struct ns_entry *entry = g_namespaces;
struct ns_entry *entry, *tmp;
while (entry) {
struct ns_entry *next = entry->next;
TAILQ_FOREACH_SAFE(entry, &g_namespaces, link, tmp) {
TAILQ_REMOVE(&g_namespaces, entry, link);
free(entry);
entry = next;
}
}
@ -396,8 +394,7 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr, struct trid_entry *trid_entry)
entry->ctrlr = ctrlr;
entry->trtype = trid_entry->trid.trtype;
entry->next = g_controllers;
g_controllers = entry;
TAILQ_INSERT_TAIL(&g_controllers, entry, link);
for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
@ -527,22 +524,18 @@ work_fn(void *arg)
printf("Starting thread on core %u\n", worker->lcore);
/* Allocate queue pairs for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
if (nvme_init_ns_worker_ctx(ns_ctx) != 0) {
printf("ERROR: init_ns_worker_ctx() failed\n");
return 1;
}
ns_ctx = ns_ctx->next;
}
tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
/* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
submit_io(ns_ctx, g_queue_depth);
ns_ctx = ns_ctx->next;
}
while (1) {
@ -551,10 +544,8 @@ work_fn(void *arg)
* I/O will be submitted in the io_complete callback
* to replace each I/O that is completed.
*/
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
check_io(ns_ctx);
ns_ctx = ns_ctx->next;
}
if (spdk_get_ticks() > tsc_end) {
@ -565,8 +556,7 @@ work_fn(void *arg)
/* drain the io of each ns_ctx in round robin to make the fairness */
do {
unfinished_ns_ctx = 0;
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
TAILQ_FOREACH(ns_ctx, &worker->ns_ctx, link) {
/* first time will enter into this if case */
if (!ns_ctx->is_draining) {
ns_ctx->is_draining = true;
@ -580,7 +570,6 @@ work_fn(void *arg)
unfinished_ns_ctx++;
}
}
ns_ctx = ns_ctx->next;
}
} while (unfinished_ns_ctx > 0);
@ -875,9 +864,6 @@ register_workers(void)
uint32_t i;
struct worker_thread *worker;
g_workers = NULL;
g_num_workers = 0;
SPDK_ENV_FOREACH_CORE(i) {
worker = calloc(1, sizeof(*worker));
if (worker == NULL) {
@ -885,9 +871,9 @@ register_workers(void)
return -1;
}
TAILQ_INIT(&worker->ns_ctx);
worker->lcore = i;
worker->next = g_workers;
g_workers = worker;
TAILQ_INSERT_TAIL(&g_workers, worker, link);
g_num_workers++;
}
@ -897,21 +883,18 @@ register_workers(void)
static void
unregister_workers(void)
{
struct worker_thread *worker = g_workers;
struct worker_thread *worker, *tmp_worker;
struct ns_worker_ctx *ns_ctx, *tmp_ns_ctx;
/* Free namespace context and worker thread */
while (worker) {
struct worker_thread *next_worker = worker->next;
struct ns_worker_ctx *ns_ctx = worker->ns_ctx;
while (ns_ctx) {
struct ns_worker_ctx *next_ns_ctx = ns_ctx->next;
TAILQ_FOREACH_SAFE(worker, &g_workers, link, tmp_worker) {
TAILQ_REMOVE(&g_workers, worker, link);
TAILQ_FOREACH_SAFE(ns_ctx, &worker->ns_ctx, link, tmp_ns_ctx) {
TAILQ_REMOVE(&worker->ns_ctx, ns_ctx, link);
free(ns_ctx);
ns_ctx = next_ns_ctx;
}
free(worker);
worker = next_worker;
}
}
@ -975,22 +958,20 @@ register_controllers(void)
static void
unregister_controllers(void)
{
struct ctrlr_entry *entry = g_controllers;
while (entry) {
struct ctrlr_entry *next = entry->next;
struct ctrlr_entry *entry, *tmp;
TAILQ_FOREACH_SAFE(entry, &g_controllers, link, tmp) {
TAILQ_REMOVE(&g_controllers, entry, link);
spdk_nvme_detach(entry->ctrlr);
free(entry);
entry = next;
}
}
static int
associate_workers_with_ns(void)
{
struct ns_entry *entry = g_namespaces;
struct worker_thread *worker = g_workers;
struct ns_entry *entry = TAILQ_FIRST(&g_namespaces);
struct worker_thread *worker = TAILQ_FIRST(&g_workers);
struct ns_worker_ctx *ns_ctx;
int i, count;
@ -1008,17 +989,17 @@ associate_workers_with_ns(void)
printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
ns_ctx->entry = entry;
ns_ctx->next = worker->ns_ctx;
worker->ns_ctx = ns_ctx;
worker = worker->next;
TAILQ_INSERT_TAIL(&worker->ns_ctx, ns_ctx, link);
worker = TAILQ_NEXT(worker, link);
if (worker == NULL) {
worker = g_workers;
worker = TAILQ_FIRST(&g_workers);
}
entry = entry->next;
entry = TAILQ_NEXT(entry, link);
if (entry == NULL) {
entry = g_namespaces;
entry = TAILQ_FIRST(&g_namespaces);
}
}
@ -1040,8 +1021,7 @@ nvme_poll_ctrlrs(void *arg)
while (true) {
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
entry = g_controllers;
while (entry) {
TAILQ_FOREACH(entry, &g_controllers, link) {
rc = spdk_nvme_ctrlr_process_admin_completions(entry->ctrlr);
/* This controller has encountered a failure at the transport level. reset it. */
if (rc == -ENXIO) {
@ -1071,7 +1051,6 @@ nvme_poll_ctrlrs(void *arg)
fprintf(stderr, "Controller properly reset.\n");
}
}
entry = entry->next;
}
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
@ -1148,15 +1127,13 @@ int main(int argc, char **argv)
/* Launch all of the slave workers */
master_core = spdk_env_get_current_core();
master_worker = NULL;
worker = g_workers;
while (worker != NULL) {
TAILQ_FOREACH(worker, &g_workers, link) {
if (worker->lcore != master_core) {
spdk_env_thread_launch_pinned(worker->lcore, work_fn, worker);
} else {
assert(master_worker == NULL);
master_worker = worker;
}
worker = worker->next;
}
assert(master_worker != NULL);