nvmf_example: add the acceptor poller

Add acceptor poller to handle all the connections.

Change-Id: I3228547d63a5812b90fe4394641cf124d9e64d12
Signed-off-by: JinYu <jin.yu@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468660
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
JinYu 2019-09-18 02:42:52 +08:00 committed by Tomasz Zawadzki
parent 095fef2f45
commit ada795302a
2 changed files with 164 additions and 7 deletions

View File

@ -44,16 +44,21 @@
#include "spdk_internal/event.h" #include "spdk_internal/event.h"
#define NVMF_DEFAULT_SUBSYSTEMS 32 #define NVMF_DEFAULT_SUBSYSTEMS 32
#define ACCEPT_TIMEOUT_US 10000 /* 10ms */
static const char *g_rpc_addr = SPDK_DEFAULT_RPC_ADDR; static const char *g_rpc_addr = SPDK_DEFAULT_RPC_ADDR;
static uint32_t g_acceptor_poll_rate = ACCEPT_TIMEOUT_US;
enum nvmf_target_state { enum nvmf_target_state {
NVMF_INIT_SUBSYSTEM = 0, NVMF_INIT_SUBSYSTEM = 0,
NVMF_INIT_TARGET, NVMF_INIT_TARGET,
NVMF_INIT_POLL_GROUPS, NVMF_INIT_POLL_GROUPS,
NVMF_INIT_START_SUBSYSTEMS, NVMF_INIT_START_SUBSYSTEMS,
NVMF_INIT_START_ACCEPTOR,
NVMF_RUNNING,
NVMF_FINI_STOP_SUBSYSTEMS, NVMF_FINI_STOP_SUBSYSTEMS,
NVMF_FINI_POLL_GROUPS, NVMF_FINI_POLL_GROUPS,
NVMF_FINI_STOP_ACCEPTOR,
NVMF_FINI_TARGET, NVMF_FINI_TARGET,
NVMF_FINI_SUBSYSTEM, NVMF_FINI_SUBSYSTEM,
}; };
@ -73,6 +78,7 @@ struct nvmf_reactor {
struct nvmf_target_poll_group { struct nvmf_target_poll_group {
struct spdk_nvmf_poll_group *group; struct spdk_nvmf_poll_group *group;
struct spdk_thread *thread; struct spdk_thread *thread;
TAILQ_ENTRY(nvmf_target_poll_group) link; TAILQ_ENTRY(nvmf_target_poll_group) link;
}; };
@ -91,6 +97,8 @@ static struct spdk_thread *g_init_thread = NULL;
static struct nvmf_target g_nvmf_tgt = { static struct nvmf_target g_nvmf_tgt = {
.max_subsystems = NVMF_DEFAULT_SUBSYSTEMS, .max_subsystems = NVMF_DEFAULT_SUBSYSTEMS,
}; };
static struct spdk_poller *g_acceptor_poller = NULL;
static struct nvmf_target_poll_group *g_next_pg = NULL;
static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER;
static bool g_reactors_exit = false; static bool g_reactors_exit = false;
static enum nvmf_target_state g_target_state; static enum nvmf_target_state g_target_state;
@ -107,6 +115,7 @@ usage(char *program_name)
printf("\t[-i shared memory ID (optional)]\n"); printf("\t[-i shared memory ID (optional)]\n");
printf("\t[-m core mask for DPDK]\n"); printf("\t[-m core mask for DPDK]\n");
printf("\t[-n max subsystems for target(default: 32)]\n"); printf("\t[-n max subsystems for target(default: 32)]\n");
printf("\t[-p acceptor poller rate in us for target(default: 10000us)]\n");
printf("\t[-r RPC listen address (default /var/tmp/spdk.sock)]\n"); printf("\t[-r RPC listen address (default /var/tmp/spdk.sock)]\n");
printf("\t[-s memory size in MB for DPDK (default: 0MB)]\n"); printf("\t[-s memory size in MB for DPDK (default: 0MB)]\n");
printf("\t[-u disable PCI access]\n"); printf("\t[-u disable PCI access]\n");
@ -118,7 +127,7 @@ parse_args(int argc, char **argv, struct spdk_env_opts *opts)
int op; int op;
long int value; long int value;
while ((op = getopt(argc, argv, "i:m:n:r:s:u:h")) != -1) { while ((op = getopt(argc, argv, "i:m:n:p:r:s:u:h")) != -1) {
switch (op) { switch (op) {
case 'i': case 'i':
value = spdk_strtol(optarg, 10); value = spdk_strtol(optarg, 10);
@ -138,6 +147,14 @@ parse_args(int argc, char **argv, struct spdk_env_opts *opts)
return -EINVAL; return -EINVAL;
} }
break; break;
case 'p':
value = spdk_strtol(optarg, 10);
if (value < 0) {
fprintf(stderr, "converting a string to integer failed\n");
return -EINVAL;
}
g_acceptor_poll_rate = value;
break;
case 'r': case 'r':
g_rpc_addr = optarg; g_rpc_addr = optarg;
break; break;
@ -432,6 +449,102 @@ nvmf_tgt_stop_subsystems(struct nvmf_target *nvmf_tgt)
} }
} }
struct nvmf_target_pg_ctx {
struct spdk_nvmf_qpair *qpair;
struct nvmf_target_poll_group *pg;
};
static void
nvmf_tgt_pg_add_qpair(void *_ctx)
{
struct nvmf_target_pg_ctx *ctx = _ctx;
struct spdk_nvmf_qpair *qpair = ctx->qpair;
struct nvmf_target_poll_group *pg = ctx->pg;
free(_ctx);
if (spdk_nvmf_poll_group_add(pg->group, qpair) != 0) {
fprintf(stderr, "unable to add the qpair to a poll group.\n");
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
}
}
static struct nvmf_target_poll_group *
nvmf_tgt_get_next_pg(struct nvmf_target *nvmf_tgt)
{
struct nvmf_target_poll_group *pg;
pg = g_next_pg;
g_next_pg = TAILQ_NEXT(pg, link);
if (g_next_pg == NULL) {
g_next_pg = TAILQ_FIRST(&g_poll_groups);
}
return pg;
}
static struct nvmf_target_poll_group *
nvmf_get_optimal_pg(struct nvmf_target *nvmf_tgt, struct spdk_nvmf_qpair *qpair)
{
struct nvmf_target_poll_group *pg, *_pg = NULL;
struct spdk_nvmf_poll_group *group = spdk_nvmf_get_optimal_poll_group(qpair);
if (group == NULL) {
_pg = nvmf_tgt_get_next_pg(nvmf_tgt);
goto end;
}
TAILQ_FOREACH(pg, &g_poll_groups, link) {
if (pg->group == group) {
_pg = pg;
break;
}
}
end:
assert(_pg != NULL);
return _pg;
}
static void
new_qpair(struct spdk_nvmf_qpair *qpair, void *cb_arg)
{
struct nvmf_target_poll_group *pg;
struct nvmf_target_pg_ctx *ctx;
struct nvmf_target *nvmf_tgt = &g_nvmf_tgt;
/* In SPDK we support three methods to get poll group: RoundRobin, Host and Transport.
* In this example we only support the "Transport" which gets the optimal poll group.
*/
pg = nvmf_get_optimal_pg(nvmf_tgt, qpair);
if (!pg) {
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
return;
}
ctx = calloc(1, sizeof(*ctx));
if (!ctx) {
fprintf(stderr, "failed to allocate poll group context.\n");
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
return;
}
ctx->qpair = qpair;
ctx->pg = pg;
spdk_thread_send_msg(pg->thread, nvmf_tgt_pg_add_qpair, ctx);
}
static int
nvmf_tgt_acceptor_poll(void *arg)
{
struct nvmf_target *nvmf_tgt = arg;
spdk_nvmf_tgt_accept(nvmf_tgt->tgt, new_qpair, NULL);
return -1;
}
static void static void
nvmf_tgt_subsystem_start_next(struct spdk_nvmf_subsystem *subsystem, nvmf_tgt_subsystem_start_next(struct spdk_nvmf_subsystem *subsystem,
void *cb_arg, int status) void *cb_arg, int status)
@ -444,6 +557,9 @@ nvmf_tgt_subsystem_start_next(struct spdk_nvmf_subsystem *subsystem,
} }
fprintf(stdout, "all subsystems of target started\n"); fprintf(stdout, "all subsystems of target started\n");
g_target_state = NVMF_INIT_START_ACCEPTOR;
nvmf_target_advance_state();
} }
static void static void
@ -466,7 +582,7 @@ nvmf_tgt_start_subsystems(struct nvmf_target *nvmf_tgt)
nvmf_tgt_subsystem_start_next, nvmf_tgt_subsystem_start_next,
NULL); NULL);
} else { } else {
g_target_state = NVMF_FINI_STOP_SUBSYSTEMS; g_target_state = NVMF_INIT_START_ACCEPTOR;
} }
} }
@ -498,6 +614,10 @@ nvmf_tgt_create_poll_group(void *ctx)
return; return;
} }
if (!g_next_pg) {
g_next_pg = pg;
}
/* spdk_for_each_channel is asynchronous, but runs on each thread in serial. /* spdk_for_each_channel is asynchronous, but runs on each thread in serial.
* Since this is the only operation occurring on the g_poll_groups list, * Since this is the only operation occurring on the g_poll_groups list,
* we don't need to take a lock. * we don't need to take a lock.
@ -525,7 +645,7 @@ nvmf_tgt_destroy_poll_groups_done(struct spdk_io_channel_iter *i, int status)
{ {
fprintf(stdout, "destroy targets's poll groups done\n"); fprintf(stdout, "destroy targets's poll groups done\n");
g_target_state = NVMF_FINI_TARGET; g_target_state = NVMF_FINI_STOP_ACCEPTOR;
nvmf_target_advance_state(); nvmf_target_advance_state();
} }
@ -606,12 +726,25 @@ nvmf_target_advance_state(void)
case NVMF_INIT_START_SUBSYSTEMS: case NVMF_INIT_START_SUBSYSTEMS:
nvmf_tgt_start_subsystems(&g_nvmf_tgt); nvmf_tgt_start_subsystems(&g_nvmf_tgt);
break; break;
case NVMF_INIT_START_ACCEPTOR:
g_acceptor_poller = spdk_poller_register(nvmf_tgt_acceptor_poll, &g_nvmf_tgt,
g_acceptor_poll_rate);
fprintf(stdout, "Acceptor running\n");
g_target_state = NVMF_RUNNING;
break;
case NVMF_RUNNING:
fprintf(stdout, "nvmf target is running\n");
break;
case NVMF_FINI_STOP_SUBSYSTEMS: case NVMF_FINI_STOP_SUBSYSTEMS:
nvmf_tgt_stop_subsystems(&g_nvmf_tgt); nvmf_tgt_stop_subsystems(&g_nvmf_tgt);
break; break;
case NVMF_FINI_POLL_GROUPS: case NVMF_FINI_POLL_GROUPS:
nvmf_poll_groups_destroy(); nvmf_poll_groups_destroy();
break; break;
case NVMF_FINI_STOP_ACCEPTOR:
spdk_poller_unregister(&g_acceptor_poller);
g_target_state = NVMF_FINI_TARGET;
break;
case NVMF_FINI_TARGET: case NVMF_FINI_TARGET:
nvmf_destroy_nvmf_tgt(); nvmf_destroy_nvmf_tgt();
break; break;
@ -633,10 +766,10 @@ static void
_nvmf_shutdown_cb(void *ctx) _nvmf_shutdown_cb(void *ctx)
{ {
/* Still in initialization state, defer shutdown operation */ /* Still in initialization state, defer shutdown operation */
if (g_target_state < NVMF_INIT_START_SUBSYSTEMS) { if (g_target_state < NVMF_RUNNING) {
spdk_thread_send_msg(spdk_get_thread(), _nvmf_shutdown_cb, NULL); spdk_thread_send_msg(spdk_get_thread(), _nvmf_shutdown_cb, NULL);
return; return;
} else if (g_target_state >= NVMF_FINI_STOP_SUBSYSTEMS) { } else if (g_target_state > NVMF_RUNNING) {
/* Already in Shutdown status, ignore the signal */ /* Already in Shutdown status, ignore the signal */
return; return;
} }

View File

@ -7,6 +7,9 @@ source $rootdir/test/nvmf/common.sh
rpc_py="$rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
function build_nvmf_example_args() function build_nvmf_example_args()
{ {
if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then if [ $SPDK_RUN_NON_ROOT -eq 1 ]; then
@ -23,8 +26,8 @@ function nvmfexamplestart()
timing_enter start_nvmf_example timing_enter start_nvmf_example
$NVMF_EXAMPLE $1 & $NVMF_EXAMPLE $1 &
nvmfpid=$! nvmfpid=$!
trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT trap 'process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
waitforfile /var/tmp/spdk.sock waitforlisten $nvmfpid
timing_exit start_nvmf_example timing_exit start_nvmf_example
} }
@ -32,6 +35,27 @@ timing_enter nvmf_example_test
nvmftestinit nvmftestinit
nvmfexamplestart "-m 0xF" nvmfexamplestart "-m 0xF"
#create transport
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
#create malloc bdev
malloc_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
#create subsystem
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
#add ns to subsystem
for malloc_bdev in $malloc_bdevs; do
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 "$malloc_bdev"
done
#add listener to subsystem
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
perf="$rootdir/examples/nvme/perf/perf"
$perf -q 64 -o 4096 -w randrw -M 30 -t 10 \
-r "trtype:${TEST_TRANSPORT} adrfam:IPv4 traddr:${NVMF_FIRST_TARGET_IP} trsvcid:${NVMF_PORT} \
subnqn:nqn.2016-06.io.spdk:cnode1"
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT
nvmftestfini nvmftestfini
timing_exit nvmf_example_test timing_exit nvmf_example_test