bdevperf: Register bdevperf as I/O device and make its start/stop asynchronous
Register the global bdevperf object as an I/O device by spdk_io_device_register(). Create and destroy I/O target groups asynchronously by using spdk_for_each_thread() and spdk_for_each_channel(). spdk_for_each_thread() and spdk_for_each_channel() serialize the execution among multiple threads, and any lock is not necessary. Additionally, for bdevperf_submit_on_core() and bdevperf_stop_io_on_core(), replace for their loop + event call by spdk_for_each_channel() and rename them by bdevperf_submit_on_group() and bdevperf_stop_io_on_group(), respectively. This advances the work to move bdevperf from core based to thread based. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I63b512d0b8e734e862afe1f7a4053414b99204d7 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478780 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
3d2ee8d1ce
commit
728481f491
@ -231,43 +231,6 @@ verify_data(void *wr_buf, int wr_buf_len, void *rd_buf, int rd_buf_len, int bloc
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
blockdev_heads_init(void)
|
||||
{
|
||||
uint32_t i;
|
||||
struct io_target_group *group, *tmp;
|
||||
|
||||
SPDK_ENV_FOREACH_CORE(i) {
|
||||
group = calloc(1, sizeof(*group));
|
||||
if (!group) {
|
||||
fprintf(stderr, "Cannot allocate io_target_group\n");
|
||||
goto error;
|
||||
}
|
||||
group->lcore = i;
|
||||
TAILQ_INIT(&group->targets);
|
||||
TAILQ_INSERT_TAIL(&g_bdevperf.groups, group, link);
|
||||
}
|
||||
return 0;
|
||||
|
||||
error:
|
||||
TAILQ_FOREACH_SAFE(group, &g_bdevperf.groups, link, tmp) {
|
||||
TAILQ_REMOVE(&g_bdevperf.groups, group, link);
|
||||
free(group);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void
|
||||
blockdev_heads_destroy(void)
|
||||
{
|
||||
struct io_target_group *group, *tmp;
|
||||
|
||||
TAILQ_FOREACH_SAFE(group, &g_bdevperf.groups, link, tmp) {
|
||||
TAILQ_REMOVE(&g_bdevperf.groups, group, link);
|
||||
free(group);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bdevperf_free_target(struct io_target *target)
|
||||
{
|
||||
@ -460,11 +423,35 @@ bdevperf_construct_targets(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
_bdevperf_fini_thread_done(struct spdk_io_channel_iter *i, int status)
|
||||
{
|
||||
spdk_io_device_unregister(&g_bdevperf, NULL);
|
||||
|
||||
spdk_app_stop(g_run_rc);
|
||||
}
|
||||
|
||||
static void
|
||||
_bdevperf_fini_thread(struct spdk_io_channel_iter *i)
|
||||
{
|
||||
struct spdk_io_channel *ch;
|
||||
struct io_target_group *group;
|
||||
|
||||
ch = spdk_io_channel_iter_get_channel(i);
|
||||
group = spdk_io_channel_get_ctx(ch);
|
||||
|
||||
TAILQ_REMOVE(&g_bdevperf.groups, group, link);
|
||||
|
||||
spdk_put_io_channel(ch);
|
||||
|
||||
spdk_for_each_channel_continue(i, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
bdevperf_fini(void)
|
||||
{
|
||||
blockdev_heads_destroy();
|
||||
spdk_app_stop(g_run_rc);
|
||||
spdk_for_each_channel(&g_bdevperf, _bdevperf_fini_thread, NULL,
|
||||
_bdevperf_fini_thread_done);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -946,11 +933,15 @@ reset_target(void *arg)
|
||||
}
|
||||
|
||||
static void
|
||||
bdevperf_submit_on_core(void *arg1, void *arg2)
|
||||
bdevperf_submit_on_group(struct spdk_io_channel_iter *i)
|
||||
{
|
||||
struct io_target_group *group = arg1;
|
||||
struct spdk_io_channel *ch;
|
||||
struct io_target_group *group;
|
||||
struct io_target *target;
|
||||
|
||||
ch = spdk_io_channel_iter_get_channel(i);
|
||||
group = spdk_io_channel_get_ctx(ch);
|
||||
|
||||
/* Submit initial I/O for each block device. Each time one
|
||||
* completes, another will be submitted. */
|
||||
TAILQ_FOREACH(target, &group->targets, link) {
|
||||
@ -973,6 +964,8 @@ bdevperf_submit_on_core(void *arg1, void *arg2)
|
||||
}
|
||||
bdevperf_submit_io(target, g_queue_depth);
|
||||
}
|
||||
|
||||
spdk_for_each_channel_continue(i, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1286,8 +1279,6 @@ verify_test_params(struct spdk_app_opts *opts)
|
||||
static int
|
||||
bdevperf_test(void)
|
||||
{
|
||||
struct io_target_group *group;
|
||||
struct spdk_event *event;
|
||||
int rc;
|
||||
|
||||
if (g_target_count == 0) {
|
||||
@ -1310,28 +1301,33 @@ bdevperf_test(void)
|
||||
g_show_performance_period_in_usec);
|
||||
}
|
||||
|
||||
/* Send events to start all I/O */
|
||||
TAILQ_FOREACH(group, &g_bdevperf.groups, link) {
|
||||
if (!TAILQ_EMPTY(&group->targets)) {
|
||||
event = spdk_event_allocate(group->lcore, bdevperf_submit_on_core,
|
||||
group, NULL);
|
||||
spdk_event_call(event);
|
||||
}
|
||||
}
|
||||
/* Iterate target groups to start all I/O */
|
||||
spdk_for_each_channel(&g_bdevperf, bdevperf_submit_on_group, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
io_target_group_create(void *io_device, void *ctx_buf)
|
||||
{
|
||||
struct io_target_group *group = ctx_buf;
|
||||
|
||||
TAILQ_INIT(&group->targets);
|
||||
group->lcore = spdk_env_get_current_core();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
bdevperf_run(void *arg1)
|
||||
io_target_group_destroy(void *io_device, void *ctx_buf)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
_bdevperf_init_thread_done(void *ctx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = blockdev_heads_init();
|
||||
if (rc) {
|
||||
spdk_app_stop(rc);
|
||||
return;
|
||||
}
|
||||
|
||||
g_master_core = spdk_env_get_current_core();
|
||||
|
||||
if (g_wait_for_tests) {
|
||||
@ -1350,23 +1346,48 @@ bdevperf_run(void *arg1)
|
||||
}
|
||||
|
||||
static void
|
||||
bdevperf_stop_io_on_core(void *arg1, void *arg2)
|
||||
_bdevperf_init_thread(void *ctx)
|
||||
{
|
||||
struct io_target_group *group = arg1;
|
||||
struct spdk_io_channel *ch;
|
||||
struct io_target_group *group;
|
||||
|
||||
ch = spdk_get_io_channel(&g_bdevperf);
|
||||
group = spdk_io_channel_get_ctx(ch);
|
||||
|
||||
TAILQ_INSERT_TAIL(&g_bdevperf.groups, group, link);
|
||||
}
|
||||
|
||||
static void
|
||||
bdevperf_run(void *arg1)
|
||||
{
|
||||
spdk_io_device_register(&g_bdevperf, io_target_group_create, io_target_group_destroy,
|
||||
sizeof(struct io_target_group), "bdevperf");
|
||||
|
||||
/* Send a message to each thread and create a target group */
|
||||
spdk_for_each_thread(_bdevperf_init_thread, NULL, _bdevperf_init_thread_done);
|
||||
}
|
||||
|
||||
static void
|
||||
bdevperf_stop_io_on_group(struct spdk_io_channel_iter *i)
|
||||
{
|
||||
struct spdk_io_channel *ch;
|
||||
struct io_target_group *group;
|
||||
struct io_target *target;
|
||||
|
||||
ch = spdk_io_channel_iter_get_channel(i);
|
||||
group = spdk_io_channel_get_ctx(ch);
|
||||
|
||||
/* Stop I/O for each block device. */
|
||||
TAILQ_FOREACH(target, &group->targets, link) {
|
||||
end_target(target);
|
||||
}
|
||||
|
||||
spdk_for_each_channel_continue(i, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bdevperf_shutdown_cb(void)
|
||||
{
|
||||
struct io_target_group *group;
|
||||
struct spdk_event *event;
|
||||
|
||||
g_shutdown = true;
|
||||
|
||||
if (TAILQ_EMPTY(&g_bdevperf.groups)) {
|
||||
@ -1382,13 +1403,7 @@ spdk_bdevperf_shutdown_cb(void)
|
||||
g_shutdown_tsc = spdk_get_ticks() - g_shutdown_tsc;
|
||||
|
||||
/* Send events to stop all I/O on each target group */
|
||||
TAILQ_FOREACH(group, &g_bdevperf.groups, link) {
|
||||
if (!TAILQ_EMPTY(&group->targets)) {
|
||||
event = spdk_event_allocate(group->lcore, bdevperf_stop_io_on_core,
|
||||
group, NULL);
|
||||
spdk_event_call(event);
|
||||
}
|
||||
}
|
||||
spdk_for_each_channel(&g_bdevperf, bdevperf_stop_io_on_group, NULL, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
|
Loading…
Reference in New Issue
Block a user