eal: rename lcore master and slave
Replace master lcore with main lcore and replace slave lcore with worker lcore. Keep the old functions and macros but mark them as deprecated for this release. The "--master-lcore" command line option is also deprecated and any usage will print a warning and use "--main-lcore" as replacement. Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
This commit is contained in:
parent
0303192581
commit
cb056611a8
@ -947,7 +947,7 @@ dump_packets(void)
|
||||
rte_exit(EXIT_FAILURE, "failed to wait\n");
|
||||
}
|
||||
|
||||
/* master core */
|
||||
/* main core */
|
||||
while (!quit_signal)
|
||||
;
|
||||
}
|
||||
|
@ -1098,7 +1098,7 @@ main(int argc, char **argv)
|
||||
if (config.trace_file != NULL)
|
||||
tracef_init();
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore)
|
||||
RTE_LCORE_FOREACH_WORKER(lcore)
|
||||
rte_eal_remote_launch(search_ip5tuples, NULL, lcore);
|
||||
|
||||
search_ip5tuples(NULL);
|
||||
|
@ -3722,14 +3722,14 @@ bler_test(struct active_device *ad,
|
||||
|
||||
rte_atomic16_set(&op_params->sync, SYNC_WAIT);
|
||||
|
||||
/* Master core is set at first entry */
|
||||
/* Main core is set at first entry */
|
||||
t_params[0].dev_id = ad->dev_id;
|
||||
t_params[0].lcore_id = rte_lcore_id();
|
||||
t_params[0].op_params = op_params;
|
||||
t_params[0].queue_id = ad->queue_ids[used_cores++];
|
||||
t_params[0].iter_count = 0;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (used_cores >= num_lcores)
|
||||
break;
|
||||
|
||||
@ -3746,7 +3746,7 @@ bler_test(struct active_device *ad,
|
||||
rte_atomic16_set(&op_params->sync, SYNC_START);
|
||||
ret = bler_function(&t_params[0]);
|
||||
|
||||
/* Master core is always used */
|
||||
/* Main core is always used */
|
||||
for (used_cores = 1; used_cores < num_lcores; used_cores++)
|
||||
ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
|
||||
|
||||
@ -3840,14 +3840,14 @@ throughput_test(struct active_device *ad,
|
||||
|
||||
rte_atomic16_set(&op_params->sync, SYNC_WAIT);
|
||||
|
||||
/* Master core is set at first entry */
|
||||
/* Main core is set at first entry */
|
||||
t_params[0].dev_id = ad->dev_id;
|
||||
t_params[0].lcore_id = rte_lcore_id();
|
||||
t_params[0].op_params = op_params;
|
||||
t_params[0].queue_id = ad->queue_ids[used_cores++];
|
||||
t_params[0].iter_count = 0;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (used_cores >= num_lcores)
|
||||
break;
|
||||
|
||||
@ -3864,7 +3864,7 @@ throughput_test(struct active_device *ad,
|
||||
rte_atomic16_set(&op_params->sync, SYNC_START);
|
||||
ret = throughput_function(&t_params[0]);
|
||||
|
||||
/* Master core is always used */
|
||||
/* Main core is always used */
|
||||
for (used_cores = 1; used_cores < num_lcores; used_cores++)
|
||||
ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
|
||||
|
||||
@ -3888,7 +3888,7 @@ throughput_test(struct active_device *ad,
|
||||
/* In interrupt TC we need to wait for the interrupt callback to deqeue
|
||||
* all pending operations. Skip waiting for queues which reported an
|
||||
* error using processing_status variable.
|
||||
* Wait for master lcore operations.
|
||||
* Wait for main lcore operations.
|
||||
*/
|
||||
tp = &t_params[0];
|
||||
while ((rte_atomic16_read(&tp->nb_dequeued) <
|
||||
@ -3901,7 +3901,7 @@ throughput_test(struct active_device *ad,
|
||||
tp->mbps /= TEST_REPETITIONS;
|
||||
ret |= (int)rte_atomic16_read(&tp->processing_status);
|
||||
|
||||
/* Wait for slave lcores operations */
|
||||
/* Wait for worker lcores operations */
|
||||
for (used_cores = 1; used_cores < num_lcores; used_cores++) {
|
||||
tp = &t_params[used_cores];
|
||||
|
||||
|
@ -389,7 +389,7 @@ main(int argc, char **argv)
|
||||
i = 0;
|
||||
uint8_t qp_id = 0, cdev_index = 0;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -413,7 +413,7 @@ main(int argc, char **argv)
|
||||
while (test_data->level <= test_data->level_lst.max) {
|
||||
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -424,7 +424,7 @@ main(int argc, char **argv)
|
||||
i++;
|
||||
}
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -449,7 +449,7 @@ end:
|
||||
|
||||
case ST_DURING_TEST:
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
|
||||
|
@ -600,7 +600,7 @@ main(int argc, char **argv)
|
||||
|
||||
i = 0;
|
||||
uint8_t qp_id = 0, cdev_index = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -664,7 +664,7 @@ main(int argc, char **argv)
|
||||
distribution_total[buffer_size_count - 1];
|
||||
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -674,7 +674,7 @@ main(int argc, char **argv)
|
||||
i++;
|
||||
}
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -694,7 +694,7 @@ main(int argc, char **argv)
|
||||
|
||||
while (opts.test_buffer_size <= opts.max_buffer_size) {
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -704,7 +704,7 @@ main(int argc, char **argv)
|
||||
i++;
|
||||
}
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -728,7 +728,7 @@ main(int argc, char **argv)
|
||||
}
|
||||
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
@ -748,7 +748,7 @@ main(int argc, char **argv)
|
||||
|
||||
err:
|
||||
i = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (i == total_nb_qps)
|
||||
break;
|
||||
|
||||
|
@ -438,7 +438,7 @@ evt_options_dump(struct evt_options *opt)
|
||||
evt_dump("verbose_level", "%d", opt->verbose_level);
|
||||
evt_dump("socket_id", "%d", opt->socket_id);
|
||||
evt_dump("pool_sz", "%d", opt->pool_sz);
|
||||
evt_dump("master lcore", "%d", rte_get_master_lcore());
|
||||
evt_dump("main lcore", "%d", rte_get_main_lcore());
|
||||
evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts);
|
||||
evt_dump("nb_timers", "%"PRIu64, opt->nb_timers);
|
||||
evt_dump_begin("available lcores");
|
||||
|
@ -75,15 +75,15 @@ order_opt_check(struct evt_options *opt)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* 1 producer + N workers + 1 master */
|
||||
/* 1 producer + N workers + main */
|
||||
if (rte_lcore_count() < 3) {
|
||||
evt_err("test need minimum 3 lcores");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Validate worker lcores */
|
||||
if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
|
||||
evt_err("worker lcores overlaps with master lcore");
|
||||
if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
|
||||
evt_err("worker lcores overlaps with main lcore");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -118,8 +118,8 @@ order_opt_check(struct evt_options *opt)
|
||||
}
|
||||
|
||||
/* Validate producer lcore */
|
||||
if (plcore == (int)rte_get_master_lcore()) {
|
||||
evt_err("producer lcore and master lcore should be different");
|
||||
if (plcore == (int)rte_get_main_lcore()) {
|
||||
evt_err("producer lcore and main lcore should be different");
|
||||
return -1;
|
||||
}
|
||||
if (!rte_lcore_is_enabled(plcore)) {
|
||||
@ -246,7 +246,7 @@ order_launch_lcores(struct evt_test *test, struct evt_options *opt,
|
||||
|
||||
int wkr_idx = 0;
|
||||
/* launch workers */
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (!(opt->wlcores[lcore_id]))
|
||||
continue;
|
||||
|
||||
|
@ -254,7 +254,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
|
||||
|
||||
int port_idx = 0;
|
||||
/* launch workers */
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (!(opt->wlcores[lcore_id]))
|
||||
continue;
|
||||
|
||||
@ -268,7 +268,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
|
||||
}
|
||||
|
||||
/* launch producers */
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (!(opt->plcores[lcore_id]))
|
||||
continue;
|
||||
|
||||
@ -541,8 +541,8 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
|
||||
{
|
||||
unsigned int lcores;
|
||||
|
||||
/* N producer + N worker + 1 master when producer cores are used
|
||||
* Else N worker + 1 master when Rx adapter is used
|
||||
/* N producer + N worker + main when producer cores are used
|
||||
* Else N worker + main when Rx adapter is used
|
||||
*/
|
||||
lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
|
||||
|
||||
@ -552,8 +552,8 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
|
||||
}
|
||||
|
||||
/* Validate worker lcores */
|
||||
if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
|
||||
evt_err("worker lcores overlaps with master lcore");
|
||||
if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
|
||||
evt_err("worker lcores overlaps with main lcore");
|
||||
return -1;
|
||||
}
|
||||
if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
|
||||
@ -573,8 +573,8 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
|
||||
opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
|
||||
/* Validate producer lcores */
|
||||
if (evt_lcores_has_overlap(opt->plcores,
|
||||
rte_get_master_lcore())) {
|
||||
evt_err("producer lcores overlaps with master lcore");
|
||||
rte_get_main_lcore())) {
|
||||
evt_err("producer lcores overlaps with main lcore");
|
||||
return -1;
|
||||
}
|
||||
if (evt_has_disabled_lcore(opt->plcores)) {
|
||||
|
@ -60,7 +60,7 @@ pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
|
||||
|
||||
int port_idx = 0;
|
||||
/* launch workers */
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (!(opt->wlcores[lcore_id]))
|
||||
continue;
|
||||
|
||||
@ -106,9 +106,8 @@ int
|
||||
pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
|
||||
{
|
||||
unsigned int lcores;
|
||||
/*
|
||||
* N worker + 1 master
|
||||
*/
|
||||
|
||||
/* N worker + main */
|
||||
lcores = 2;
|
||||
|
||||
if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) {
|
||||
@ -129,8 +128,8 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
|
||||
}
|
||||
|
||||
/* Validate worker lcores */
|
||||
if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
|
||||
evt_err("worker lcores overlaps with master lcore");
|
||||
if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
|
||||
evt_err("worker lcores overlaps with main lcore");
|
||||
return -1;
|
||||
}
|
||||
if (evt_has_disabled_lcore(opt->wlcores)) {
|
||||
|
@ -1445,7 +1445,7 @@ main(int argc, char **argv)
|
||||
|
||||
if (enable_fwd) {
|
||||
init_lcore_info();
|
||||
rte_eal_mp_remote_launch(start_forwarding, NULL, CALL_MASTER);
|
||||
rte_eal_mp_remote_launch(start_forwarding, NULL, CALL_MAIN);
|
||||
}
|
||||
|
||||
RTE_ETH_FOREACH_DEV(port) {
|
||||
|
@ -66,8 +66,8 @@ main(int argc, char **argv)
|
||||
app_init();
|
||||
|
||||
/* Launch per-lcore init on every lcore */
|
||||
rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MASTER);
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore) {
|
||||
rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MAIN);
|
||||
RTE_LCORE_FOREACH_WORKER(lcore) {
|
||||
if (rte_eal_wait_lcore(lcore) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
@ -20238,7 +20238,7 @@ cmdline_read_from_file(const char *filename)
|
||||
printf("Read CLI commands from %s\n", filename);
|
||||
}
|
||||
|
||||
/* prompt function, called from main on MASTER lcore */
|
||||
/* prompt function, called from main on MAIN lcore */
|
||||
void
|
||||
prompt(void)
|
||||
{
|
||||
|
@ -3456,9 +3456,9 @@ set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
|
||||
printf("lcore %u not enabled\n", lcore_cpuid);
|
||||
return -1;
|
||||
}
|
||||
if (lcore_cpuid == rte_get_master_lcore()) {
|
||||
if (lcore_cpuid == rte_get_main_lcore()) {
|
||||
printf("lcore %u cannot be masked on for running "
|
||||
"packet forwarding, which is the master lcore "
|
||||
"packet forwarding, which is the main lcore "
|
||||
"and reserved for command line parsing only\n",
|
||||
lcore_cpuid);
|
||||
return -1;
|
||||
|
@ -89,7 +89,7 @@ usage(char* progname)
|
||||
printf(" --nb-ports=N: set the number of forwarding ports "
|
||||
"(1 <= N <= %d).\n", nb_ports);
|
||||
printf(" --coremask=COREMASK: hexadecimal bitmask of cores running "
|
||||
"the packet forwarding test. The master lcore is reserved for "
|
||||
"the packet forwarding test. The main lcore is reserved for "
|
||||
"command line parsing only, and cannot be masked on for "
|
||||
"packet forwarding.\n");
|
||||
printf(" --portmask=PORTMASK: hexadecimal bitmask of ports used "
|
||||
|
@ -83,7 +83,7 @@
|
||||
uint16_t verbose_level = 0; /**< Silent by default. */
|
||||
int testpmd_logtype; /**< Log type for testpmd logs */
|
||||
|
||||
/* use master core for command line ? */
|
||||
/* use main core for command line ? */
|
||||
uint8_t interactive = 0;
|
||||
uint8_t auto_start = 0;
|
||||
uint8_t tx_first;
|
||||
@ -581,7 +581,7 @@ set_default_fwd_lcores_config(void)
|
||||
}
|
||||
socket_ids[num_sockets++] = sock_num;
|
||||
}
|
||||
if (i == rte_get_master_lcore())
|
||||
if (i == rte_get_main_lcore())
|
||||
continue;
|
||||
fwd_lcores_cpuids[nb_lc++] = i;
|
||||
}
|
||||
|
@ -657,11 +657,11 @@ main(int argc, char **argv)
|
||||
|
||||
add_rules(sad, 10);
|
||||
if (config.parallel_lookup)
|
||||
rte_eal_mp_remote_launch(lookup, sad, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(lookup, sad, SKIP_MAIN);
|
||||
|
||||
lookup(sad);
|
||||
if (config.parallel_lookup)
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id)
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id)
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -103,7 +103,7 @@ def rwlock_autotest(child, test_name):
|
||||
index = child.expect(["Test OK",
|
||||
"Test Failed",
|
||||
"Hello from core ([0-9]*) !",
|
||||
"Global write lock taken on master "
|
||||
"Global write lock taken on main "
|
||||
"core ([0-9]*)",
|
||||
pexpect.TIMEOUT], timeout=10)
|
||||
# ok
|
||||
|
@ -189,7 +189,7 @@ fast_tests = [
|
||||
['cycles_autotest', true],
|
||||
['debug_autotest', true],
|
||||
['eal_flags_c_opt_autotest', false],
|
||||
['eal_flags_master_opt_autotest', false],
|
||||
['eal_flags_main_opt_autotest', false],
|
||||
['eal_flags_n_opt_autotest', false],
|
||||
['eal_flags_hpet_autotest', false],
|
||||
['eal_flags_no_huge_autotest', false],
|
||||
|
@ -58,7 +58,7 @@ do_recursive_call(void)
|
||||
#endif
|
||||
#endif
|
||||
{ "test_missing_c_flag", no_action },
|
||||
{ "test_master_lcore_flag", no_action },
|
||||
{ "test_main_lcore_flag", no_action },
|
||||
{ "test_invalid_n_flag", no_action },
|
||||
{ "test_no_hpet_flag", no_action },
|
||||
{ "test_whitelist_flag", no_action },
|
||||
|
@ -456,7 +456,7 @@ test_atomic(void)
|
||||
|
||||
printf("usual inc/dec/add/sub functions\n");
|
||||
|
||||
rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MAIN);
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
rte_eal_mp_wait_lcore();
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
@ -482,7 +482,7 @@ test_atomic(void)
|
||||
rte_atomic32_set(&a32, 0);
|
||||
rte_atomic16_set(&a16, 0);
|
||||
rte_atomic64_set(&count, 0);
|
||||
rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MAIN);
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
rte_eal_mp_wait_lcore();
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
@ -499,7 +499,7 @@ test_atomic(void)
|
||||
rte_atomic16_set(&a16, 0);
|
||||
rte_atomic64_set(&count, 0);
|
||||
rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL,
|
||||
SKIP_MASTER);
|
||||
SKIP_MAIN);
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
rte_eal_mp_wait_lcore();
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
@ -510,8 +510,8 @@ test_atomic(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Set a64, a32 and a16 with the same value of minus "number of slave
|
||||
* lcores", launch all slave lcores to atomically increase by one and
|
||||
* Set a64, a32 and a16 with the same value of minus "number of worker
|
||||
* lcores", launch all worker lcores to atomically increase by one and
|
||||
* test them respectively.
|
||||
* Each lcore should have only one chance to increase a64 by one and
|
||||
* then check if it is equal to 0, but there should be only one lcore
|
||||
@ -519,7 +519,7 @@ test_atomic(void)
|
||||
* Then a variable of "count", initialized to zero, is increased by
|
||||
* one if a64, a32 or a16 is 0 after being increased and tested
|
||||
* atomically.
|
||||
* We can check if "count" is finally equal to 3 to see if all slave
|
||||
* We can check if "count" is finally equal to 3 to see if all worker
|
||||
* lcores performed "atomic inc and test" right.
|
||||
*/
|
||||
printf("inc and test\n");
|
||||
@ -533,7 +533,7 @@ test_atomic(void)
|
||||
rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count()));
|
||||
rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count()));
|
||||
rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count()));
|
||||
rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MAIN);
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
rte_eal_mp_wait_lcore();
|
||||
rte_atomic32_clear(&synchro);
|
||||
@ -544,7 +544,7 @@ test_atomic(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as above, but this time we set the values to "number of slave
|
||||
* Same as above, but this time we set the values to "number of worker
|
||||
* lcores", and decrement instead of increment.
|
||||
*/
|
||||
printf("dec and test\n");
|
||||
@ -555,7 +555,7 @@ test_atomic(void)
|
||||
rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1));
|
||||
rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1));
|
||||
rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1));
|
||||
rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MAIN);
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
rte_eal_mp_wait_lcore();
|
||||
rte_atomic32_clear(&synchro);
|
||||
@ -569,10 +569,10 @@ test_atomic(void)
|
||||
/*
|
||||
* This case tests the functionality of rte_atomic128_cmp_exchange
|
||||
* API. It calls rte_atomic128_cmp_exchange with four kinds of memory
|
||||
* models successively on each slave core. Once each 128-bit atomic
|
||||
* models successively on each worker core. Once each 128-bit atomic
|
||||
* compare and swap operation is successful, it updates the global
|
||||
* 128-bit counter by 2 for the first 64-bit and 1 for the second
|
||||
* 64-bit. Each slave core iterates this test N times.
|
||||
* 64-bit. Each worker core iterates this test N times.
|
||||
* At the end of test, verify whether the first 64-bits of the 128-bit
|
||||
* counter and the second 64bits is differ by the total iterations. If
|
||||
* it is, the test passes.
|
||||
@ -585,7 +585,7 @@ test_atomic(void)
|
||||
count128.val[1] = 0;
|
||||
|
||||
rte_eal_mp_remote_launch(test_atomic128_cmp_exchange, NULL,
|
||||
SKIP_MASTER);
|
||||
SKIP_MAIN);
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
rte_eal_mp_wait_lcore();
|
||||
rte_atomic32_clear(&synchro);
|
||||
@ -619,7 +619,7 @@ test_atomic(void)
|
||||
token64 = ((uint64_t)get_crc8(&t.u8[0], sizeof(token64) - 1) << 56)
|
||||
| (t.u64 & 0x00ffffffffffffff);
|
||||
|
||||
rte_eal_mp_remote_launch(test_atomic_exchange, NULL, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(test_atomic_exchange, NULL, SKIP_MAIN);
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
rte_eal_mp_wait_lcore();
|
||||
rte_atomic32_clear(&synchro);
|
||||
|
@ -236,7 +236,7 @@ plock_test(uint64_t iter, enum plock_use_type utype)
|
||||
|
||||
/* test phase - start and wait for completion on each active lcore */
|
||||
|
||||
rte_eal_mp_remote_launch(plock_test1_lcore, lpt, CALL_MASTER);
|
||||
rte_eal_mp_remote_launch(plock_test1_lcore, lpt, CALL_MAIN);
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
/* validation phase - make sure that shared and local data match */
|
||||
|
@ -190,7 +190,7 @@ static const char test_buf_shakespeare[] =
|
||||
" servitude: I will no longer endure it, though yet I\n"
|
||||
" know no wise remedy how to avoid it.\n"
|
||||
"\n"
|
||||
"ADAM Yonder comes my master, your brother.\n"
|
||||
"ADAM Yonder comes my main, your brother.\n"
|
||||
"\n"
|
||||
"ORLANDO Go apart, Adam, and thou shalt hear how he will\n";
|
||||
|
||||
|
@ -717,7 +717,7 @@ testsuite_setup(void)
|
||||
/* Identify the Worker Cores
|
||||
* Use 2 worker cores for the device args
|
||||
*/
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
if (worker_core_count > 1)
|
||||
break;
|
||||
snprintf(vdev_args, sizeof(vdev_args),
|
||||
|
@ -862,13 +862,13 @@ test_distributor(void)
|
||||
sizeof(worker_params.name));
|
||||
|
||||
rte_eal_mp_remote_launch(handle_work,
|
||||
&worker_params, SKIP_MASTER);
|
||||
&worker_params, SKIP_MAIN);
|
||||
if (sanity_test(&worker_params, p) < 0)
|
||||
goto err;
|
||||
quit_workers(&worker_params, p);
|
||||
|
||||
rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
|
||||
&worker_params, SKIP_MASTER);
|
||||
&worker_params, SKIP_MAIN);
|
||||
if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
|
||||
goto err;
|
||||
quit_workers(&worker_params, p);
|
||||
@ -876,7 +876,7 @@ test_distributor(void)
|
||||
if (rte_lcore_count() > 2) {
|
||||
rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
|
||||
&worker_params,
|
||||
SKIP_MASTER);
|
||||
SKIP_MAIN);
|
||||
if (sanity_test_with_worker_shutdown(&worker_params,
|
||||
p) < 0)
|
||||
goto err;
|
||||
@ -884,14 +884,14 @@ test_distributor(void)
|
||||
|
||||
rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
|
||||
&worker_params,
|
||||
SKIP_MASTER);
|
||||
SKIP_MAIN);
|
||||
if (test_flush_with_worker_shutdown(&worker_params,
|
||||
p) < 0)
|
||||
goto err;
|
||||
quit_workers(&worker_params, p);
|
||||
|
||||
rte_eal_mp_remote_launch(handle_and_mark_work,
|
||||
&worker_params, SKIP_MASTER);
|
||||
&worker_params, SKIP_MAIN);
|
||||
if (sanity_mark_test(&worker_params, p) < 0)
|
||||
goto err;
|
||||
quit_workers(&worker_params, p);
|
||||
|
@ -54,10 +54,10 @@ time_cache_line_switch(void)
|
||||
/* allocate a full cache line for data, we use only first byte of it */
|
||||
uint64_t data[RTE_CACHE_LINE_SIZE*3 / sizeof(uint64_t)];
|
||||
|
||||
unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
|
||||
unsigned int i, workerid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
|
||||
volatile uint64_t *pdata = &data[0];
|
||||
*pdata = 1;
|
||||
rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], slaveid);
|
||||
rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], workerid);
|
||||
while (*pdata)
|
||||
rte_pause();
|
||||
|
||||
@ -72,7 +72,7 @@ time_cache_line_switch(void)
|
||||
while (*pdata)
|
||||
rte_pause();
|
||||
*pdata = 2;
|
||||
rte_eal_wait_lcore(slaveid);
|
||||
rte_eal_wait_lcore(workerid);
|
||||
printf("==== Cache line switch test ===\n");
|
||||
printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER_CL),
|
||||
end_time-start_time);
|
||||
@ -251,13 +251,13 @@ test_distributor_perf(void)
|
||||
}
|
||||
|
||||
printf("=== Performance test of distributor (single mode) ===\n");
|
||||
rte_eal_mp_remote_launch(handle_work, ds, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(handle_work, ds, SKIP_MAIN);
|
||||
if (perf_test(ds, p) < 0)
|
||||
return -1;
|
||||
quit_workers(ds, p);
|
||||
|
||||
printf("=== Performance test of distributor (burst mode) ===\n");
|
||||
rte_eal_mp_remote_launch(handle_work, db, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(handle_work, db, SKIP_MAIN);
|
||||
if (perf_test(db, p) < 0)
|
||||
return -1;
|
||||
quit_workers(db, p);
|
||||
|
@ -599,10 +599,10 @@ test_missing_c_flag(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Test --master-lcore option with matching coremask
|
||||
* Test --main-lcore option with matching coremask
|
||||
*/
|
||||
static int
|
||||
test_master_lcore_flag(void)
|
||||
test_main_lcore_flag(void)
|
||||
{
|
||||
#ifdef RTE_EXEC_ENV_FREEBSD
|
||||
/* BSD target doesn't support prefixes at this point */
|
||||
@ -619,34 +619,34 @@ test_master_lcore_flag(void)
|
||||
if (!rte_lcore_is_enabled(0) || !rte_lcore_is_enabled(1))
|
||||
return TEST_SKIPPED;
|
||||
|
||||
/* --master-lcore flag but no value */
|
||||
/* --main-lcore flag but no value */
|
||||
const char *argv1[] = { prgname, prefix, mp_flag,
|
||||
"-c", "3", "--master-lcore"};
|
||||
/* --master-lcore flag with invalid value */
|
||||
"-c", "3", "--main-lcore"};
|
||||
/* --main-lcore flag with invalid value */
|
||||
const char *argv2[] = { prgname, prefix, mp_flag,
|
||||
"-c", "3", "--master-lcore", "-1"};
|
||||
"-c", "3", "--main-lcore", "-1"};
|
||||
const char *argv3[] = { prgname, prefix, mp_flag,
|
||||
"-c", "3", "--master-lcore", "X"};
|
||||
/* master lcore not in coremask */
|
||||
"-c", "3", "--main-lcore", "X"};
|
||||
/* main lcore not in coremask */
|
||||
const char *argv4[] = { prgname, prefix, mp_flag,
|
||||
"-c", "3", "--master-lcore", "2"};
|
||||
"-c", "3", "--main-lcore", "2"};
|
||||
/* valid value */
|
||||
const char *argv5[] = { prgname, prefix, mp_flag,
|
||||
"-c", "3", "--master-lcore", "1"};
|
||||
"-c", "3", "--main-lcore", "1"};
|
||||
/* valid value set before coremask */
|
||||
const char *argv6[] = { prgname, prefix, mp_flag,
|
||||
"--master-lcore", "1", "-c", "3"};
|
||||
"--main-lcore", "1", "-c", "3"};
|
||||
|
||||
if (launch_proc(argv1) == 0
|
||||
|| launch_proc(argv2) == 0
|
||||
|| launch_proc(argv3) == 0
|
||||
|| launch_proc(argv4) == 0) {
|
||||
printf("Error - process ran without error with wrong --master-lcore\n");
|
||||
printf("Error - process ran without error with wrong --main-lcore\n");
|
||||
return -1;
|
||||
}
|
||||
if (launch_proc(argv5) != 0
|
||||
|| launch_proc(argv6) != 0) {
|
||||
printf("Error - process did not run ok with valid --master-lcore\n");
|
||||
printf("Error - process did not run ok with valid --main-lcore\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
@ -1468,9 +1468,9 @@ test_eal_flags(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = test_master_lcore_flag();
|
||||
ret = test_main_lcore_flag();
|
||||
if (ret < 0) {
|
||||
printf("Error in test_master_lcore_flag()\n");
|
||||
printf("Error in test_main_lcore_flag()\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1542,7 +1542,7 @@ REGISTER_TEST_COMMAND(eal_flags_autotest, test_eal_flags);
|
||||
|
||||
/* subtests used in meson for CI */
|
||||
REGISTER_TEST_COMMAND(eal_flags_c_opt_autotest, test_missing_c_flag);
|
||||
REGISTER_TEST_COMMAND(eal_flags_master_opt_autotest, test_master_lcore_flag);
|
||||
REGISTER_TEST_COMMAND(eal_flags_main_opt_autotest, test_main_lcore_flag);
|
||||
REGISTER_TEST_COMMAND(eal_flags_n_opt_autotest, test_invalid_n_flag);
|
||||
REGISTER_TEST_COMMAND(eal_flags_hpet_autotest, test_no_hpet_flag);
|
||||
REGISTER_TEST_COMMAND(eal_flags_no_huge_autotest, test_no_huge_flag);
|
||||
|
@ -95,7 +95,7 @@ static inline uint8_t efd_get_all_sockets_bitmask(void)
|
||||
{
|
||||
uint8_t all_cpu_sockets_bitmask = 0;
|
||||
unsigned int i;
|
||||
unsigned int next_lcore = rte_get_master_lcore();
|
||||
unsigned int next_lcore = rte_get_main_lcore();
|
||||
const int val_true = 1, val_false = 0;
|
||||
for (i = 0; i < rte_lcore_count(); i++) {
|
||||
all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
|
||||
|
@ -33,7 +33,7 @@ static inline uint8_t efd_get_all_sockets_bitmask(void)
|
||||
{
|
||||
uint8_t all_cpu_sockets_bitmask = 0;
|
||||
unsigned int i;
|
||||
unsigned int next_lcore = rte_get_master_lcore();
|
||||
unsigned int next_lcore = rte_get_main_lcore();
|
||||
const int val_true = 1, val_false = 0;
|
||||
for (i = 0; i < rte_lcore_count(); i++) {
|
||||
all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
|
||||
|
@ -57,8 +57,8 @@ typedef void (*case_clean_t)(unsigned lcore_id);
|
||||
static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
|
||||
static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
|
||||
|
||||
#define WAIT_SYNCHRO_FOR_SLAVES() do{ \
|
||||
if (lcore_self != rte_get_master_lcore()) \
|
||||
#define WAIT_SYNCHRO_FOR_WORKERS() do { \
|
||||
if (lcore_self != rte_get_main_lcore()) \
|
||||
while (rte_atomic32_read(&synchro) == 0); \
|
||||
} while(0)
|
||||
|
||||
@ -70,7 +70,7 @@ test_eal_init_once(__rte_unused void *arg)
|
||||
{
|
||||
unsigned lcore_self = rte_lcore_id();
|
||||
|
||||
WAIT_SYNCHRO_FOR_SLAVES();
|
||||
WAIT_SYNCHRO_FOR_WORKERS();
|
||||
|
||||
rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
|
||||
if (rte_eal_init(0, NULL) != -1)
|
||||
@ -106,7 +106,7 @@ ring_create_lookup(__rte_unused void *arg)
|
||||
char ring_name[MAX_STRING_SIZE];
|
||||
int i;
|
||||
|
||||
WAIT_SYNCHRO_FOR_SLAVES();
|
||||
WAIT_SYNCHRO_FOR_WORKERS();
|
||||
|
||||
/* create the same ring simultaneously on all threads */
|
||||
for (i = 0; i < MAX_ITER_ONCE; i++) {
|
||||
@ -166,7 +166,7 @@ mempool_create_lookup(__rte_unused void *arg)
|
||||
char mempool_name[MAX_STRING_SIZE];
|
||||
int i;
|
||||
|
||||
WAIT_SYNCHRO_FOR_SLAVES();
|
||||
WAIT_SYNCHRO_FOR_WORKERS();
|
||||
|
||||
/* create the same mempool simultaneously on all threads */
|
||||
for (i = 0; i < MAX_ITER_ONCE; i++) {
|
||||
@ -232,7 +232,7 @@ hash_create_free(__rte_unused void *arg)
|
||||
.socket_id = 0,
|
||||
};
|
||||
|
||||
WAIT_SYNCHRO_FOR_SLAVES();
|
||||
WAIT_SYNCHRO_FOR_WORKERS();
|
||||
|
||||
/* create the same hash simultaneously on all threads */
|
||||
hash_params.name = "fr_test_once";
|
||||
@ -296,7 +296,7 @@ fbk_create_free(__rte_unused void *arg)
|
||||
.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
|
||||
};
|
||||
|
||||
WAIT_SYNCHRO_FOR_SLAVES();
|
||||
WAIT_SYNCHRO_FOR_WORKERS();
|
||||
|
||||
/* create the same fbk hash table simultaneously on all threads */
|
||||
fbk_params.name = "fr_test_once";
|
||||
@ -359,7 +359,7 @@ lpm_create_free(__rte_unused void *arg)
|
||||
char lpm_name[MAX_STRING_SIZE];
|
||||
int i;
|
||||
|
||||
WAIT_SYNCHRO_FOR_SLAVES();
|
||||
WAIT_SYNCHRO_FOR_WORKERS();
|
||||
|
||||
/* create the same lpm simultaneously on all threads */
|
||||
for (i = 0; i < MAX_ITER_ONCE; i++) {
|
||||
@ -430,7 +430,7 @@ launch_test(struct test_case *pt_case)
|
||||
rte_atomic32_set(&obj_count, 0);
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (cores == 1)
|
||||
break;
|
||||
cores--;
|
||||
@ -443,7 +443,7 @@ launch_test(struct test_case *pt_case)
|
||||
ret = -1;
|
||||
|
||||
cores = cores_save;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (cores == 1)
|
||||
break;
|
||||
cores--;
|
||||
|
@ -64,7 +64,7 @@ test_hash_multiwriter_worker(void *arg)
|
||||
|
||||
/*
|
||||
* Calculate offset for entries based on the position of the
|
||||
* logical core, from the master core (not counting not enabled cores)
|
||||
* logical core, from the main core (not counting not enabled cores)
|
||||
*/
|
||||
offset = pos_core * tbl_multiwriter_test_params.nb_tsx_insertion;
|
||||
|
||||
@ -194,7 +194,7 @@ test_hash_multiwriter(void)
|
||||
|
||||
/* Fire all threads. */
|
||||
rte_eal_mp_remote_launch(test_hash_multiwriter_worker,
|
||||
enabled_core_ids, CALL_MASTER);
|
||||
enabled_core_ids, CALL_MAIN);
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
count = rte_hash_count(handle);
|
||||
|
@ -25,7 +25,7 @@
|
||||
#define NUM_TEST 3
|
||||
unsigned int core_cnt[NUM_TEST] = {2, 4, 8};
|
||||
|
||||
unsigned int slave_core_ids[RTE_MAX_LCORE];
|
||||
unsigned int worker_core_ids[RTE_MAX_LCORE];
|
||||
struct perf {
|
||||
uint32_t single_read;
|
||||
uint32_t single_write;
|
||||
@ -65,7 +65,7 @@ test_hash_readwrite_worker(__rte_unused void *arg)
|
||||
ret = rte_malloc(NULL, sizeof(int) *
|
||||
tbl_rw_test_param.num_insert, 0);
|
||||
for (i = 0; i < rte_lcore_count(); i++) {
|
||||
if (slave_core_ids[i] == lcore_id)
|
||||
if (worker_core_ids[i] == lcore_id)
|
||||
break;
|
||||
}
|
||||
offset = tbl_rw_test_param.num_insert * i;
|
||||
@ -206,7 +206,7 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
|
||||
uint32_t duplicated_keys = 0;
|
||||
uint32_t lost_keys = 0;
|
||||
int use_jhash = 1;
|
||||
int slave_cnt = rte_lcore_count() - 1;
|
||||
int worker_cnt = rte_lcore_count() - 1;
|
||||
uint32_t tot_insert = 0;
|
||||
|
||||
rte_atomic64_init(&gcycles);
|
||||
@ -224,11 +224,10 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
|
||||
tot_insert = TOTAL_INSERT;
|
||||
|
||||
tbl_rw_test_param.num_insert =
|
||||
tot_insert / slave_cnt;
|
||||
tot_insert / worker_cnt;
|
||||
|
||||
tbl_rw_test_param.rounded_tot_insert =
|
||||
tbl_rw_test_param.num_insert
|
||||
* slave_cnt;
|
||||
tbl_rw_test_param.num_insert * worker_cnt;
|
||||
|
||||
printf("\nHTM = %d, RW-LF = %d, EXT-Table = %d\n",
|
||||
use_htm, use_rw_lf, use_ext);
|
||||
@ -236,7 +235,7 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
|
||||
|
||||
/* Fire all threads. */
|
||||
rte_eal_mp_remote_launch(test_hash_readwrite_worker,
|
||||
NULL, SKIP_MASTER);
|
||||
NULL, SKIP_MAIN);
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
while (rte_hash_iterate(tbl_rw_test_param.h, &next_key,
|
||||
@ -330,7 +329,7 @@ test_rw_writer(void *arg)
|
||||
uint64_t offset;
|
||||
|
||||
for (i = 0; i < rte_lcore_count(); i++) {
|
||||
if (slave_core_ids[i] == lcore_id)
|
||||
if (worker_core_ids[i] == lcore_id)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -433,8 +432,8 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
|
||||
perf_results->single_read = end / i;
|
||||
|
||||
for (n = 0; n < NUM_TEST; n++) {
|
||||
unsigned int tot_slave_lcore = rte_lcore_count() - 1;
|
||||
if (tot_slave_lcore < core_cnt[n] * 2)
|
||||
unsigned int tot_worker_lcore = rte_lcore_count() - 1;
|
||||
if (tot_worker_lcore < core_cnt[n] * 2)
|
||||
goto finish;
|
||||
|
||||
rte_atomic64_clear(&greads);
|
||||
@ -467,7 +466,7 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
|
||||
for (i = 0; i < core_cnt[n]; i++)
|
||||
rte_eal_remote_launch(test_rw_reader,
|
||||
(void *)(uintptr_t)read_cnt,
|
||||
slave_core_ids[i]);
|
||||
worker_core_ids[i]);
|
||||
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
@ -476,7 +475,7 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
|
||||
for (; i < core_cnt[n] * 2; i++)
|
||||
rte_eal_remote_launch(test_rw_writer,
|
||||
(void *)((uintptr_t)start_coreid),
|
||||
slave_core_ids[i]);
|
||||
worker_core_ids[i]);
|
||||
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
@ -521,20 +520,20 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
|
||||
for (i = core_cnt[n]; i < core_cnt[n] * 2; i++)
|
||||
rte_eal_remote_launch(test_rw_writer,
|
||||
(void *)((uintptr_t)start_coreid),
|
||||
slave_core_ids[i]);
|
||||
worker_core_ids[i]);
|
||||
for (i = 0; i < core_cnt[n]; i++)
|
||||
rte_eal_remote_launch(test_rw_reader,
|
||||
(void *)(uintptr_t)read_cnt,
|
||||
slave_core_ids[i]);
|
||||
worker_core_ids[i]);
|
||||
} else {
|
||||
for (i = 0; i < core_cnt[n]; i++)
|
||||
rte_eal_remote_launch(test_rw_reader,
|
||||
(void *)(uintptr_t)read_cnt,
|
||||
slave_core_ids[i]);
|
||||
worker_core_ids[i]);
|
||||
for (; i < core_cnt[n] * 2; i++)
|
||||
rte_eal_remote_launch(test_rw_writer,
|
||||
(void *)((uintptr_t)start_coreid),
|
||||
slave_core_ids[i]);
|
||||
worker_core_ids[i]);
|
||||
}
|
||||
|
||||
rte_eal_mp_wait_lcore();
|
||||
@ -626,8 +625,8 @@ test_hash_rw_perf_main(void)
|
||||
return TEST_SKIPPED;
|
||||
}
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(core_id) {
|
||||
slave_core_ids[i] = core_id;
|
||||
RTE_LCORE_FOREACH_WORKER(core_id) {
|
||||
worker_core_ids[i] = core_id;
|
||||
i++;
|
||||
}
|
||||
|
||||
@ -710,8 +709,8 @@ test_hash_rw_func_main(void)
|
||||
return TEST_SKIPPED;
|
||||
}
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(core_id) {
|
||||
slave_core_ids[i] = core_id;
|
||||
RTE_LCORE_FOREACH_WORKER(core_id) {
|
||||
worker_core_ids[i] = core_id;
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ static struct rte_kni_ops kni_ops = {
|
||||
.config_promiscusity = NULL,
|
||||
};
|
||||
|
||||
static unsigned lcore_master, lcore_ingress, lcore_egress;
|
||||
static unsigned int lcore_main, lcore_ingress, lcore_egress;
|
||||
static struct rte_kni *test_kni_ctx;
|
||||
static struct test_kni_stats stats;
|
||||
|
||||
@ -202,7 +202,7 @@ error:
|
||||
* supported by KNI kernel module. The ingress lcore will allocate mbufs and
|
||||
* transmit them to kernel space; while the egress lcore will receive the mbufs
|
||||
* from kernel space and free them.
|
||||
* On the master lcore, several commands will be run to check handling the
|
||||
* On the main lcore, several commands will be run to check handling the
|
||||
* kernel requests. And it will finally set the flag to exit the KNI
|
||||
* transmitting/receiving to/from the kernel space.
|
||||
*
|
||||
@ -217,7 +217,7 @@ test_kni_loop(__rte_unused void *arg)
|
||||
const unsigned lcore_id = rte_lcore_id();
|
||||
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
|
||||
|
||||
if (lcore_id == lcore_master) {
|
||||
if (lcore_id == lcore_main) {
|
||||
rte_delay_ms(KNI_TIMEOUT_MS);
|
||||
/* tests of handling kernel request */
|
||||
if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
|
||||
@ -276,12 +276,12 @@ test_kni_allocate_lcores(void)
|
||||
{
|
||||
unsigned i, count = 0;
|
||||
|
||||
lcore_master = rte_get_master_lcore();
|
||||
printf("master lcore: %u\n", lcore_master);
|
||||
lcore_main = rte_get_main_lcore();
|
||||
printf("main lcore: %u\n", lcore_main);
|
||||
for (i = 0; i < RTE_MAX_LCORE; i++) {
|
||||
if (count >=2 )
|
||||
break;
|
||||
if (rte_lcore_is_enabled(i) && i != lcore_master) {
|
||||
if (rte_lcore_is_enabled(i) && i != lcore_main) {
|
||||
count ++;
|
||||
if (count == 1)
|
||||
lcore_ingress = i;
|
||||
@ -487,8 +487,8 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
|
||||
if (ret != 0)
|
||||
goto fail_kni;
|
||||
|
||||
rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MAIN);
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
if (rte_eal_wait_lcore(i) < 0) {
|
||||
ret = -1;
|
||||
goto fail_kni;
|
||||
|
@ -498,7 +498,7 @@ test_lpm_rcu_perf_multi_writer(void)
|
||||
}
|
||||
|
||||
num_cores = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(core_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(core_id) {
|
||||
enabled_core_ids[num_cores] = core_id;
|
||||
num_cores++;
|
||||
}
|
||||
@ -651,7 +651,7 @@ test_lpm_rcu_perf(void)
|
||||
}
|
||||
|
||||
num_cores = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(core_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(core_id) {
|
||||
enabled_core_ids[num_cores] = core_id;
|
||||
num_cores++;
|
||||
}
|
||||
|
@ -1010,11 +1010,11 @@ test_malloc(void)
|
||||
else printf("test_realloc() passed\n");
|
||||
|
||||
/*----------------------------*/
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
rte_eal_remote_launch(test_align_overlap_per_lcore, NULL, lcore_id);
|
||||
}
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
ret = -1;
|
||||
}
|
||||
@ -1025,11 +1025,11 @@ test_malloc(void)
|
||||
else printf("test_align_overlap_per_lcore() passed\n");
|
||||
|
||||
/*----------------------------*/
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
rte_eal_remote_launch(test_reordered_free_per_lcore, NULL, lcore_id);
|
||||
}
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
ret = -1;
|
||||
}
|
||||
@ -1040,11 +1040,11 @@ test_malloc(void)
|
||||
else printf("test_reordered_free_per_lcore() passed\n");
|
||||
|
||||
/*----------------------------*/
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
rte_eal_remote_launch(test_random_alloc_free, NULL, lcore_id);
|
||||
}
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
ret = -1;
|
||||
}
|
||||
|
@ -72,7 +72,7 @@
|
||||
|
||||
#ifdef RTE_MBUF_REFCNT_ATOMIC
|
||||
|
||||
static volatile uint32_t refcnt_stop_slaves;
|
||||
static volatile uint32_t refcnt_stop_workers;
|
||||
static unsigned refcnt_lcore[RTE_MAX_LCORE];
|
||||
|
||||
#endif
|
||||
@ -1000,7 +1000,7 @@ test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
|
||||
#ifdef RTE_MBUF_REFCNT_ATOMIC
|
||||
|
||||
static int
|
||||
test_refcnt_slave(void *arg)
|
||||
test_refcnt_worker(void *arg)
|
||||
{
|
||||
unsigned lcore, free;
|
||||
void *mp = 0;
|
||||
@ -1010,7 +1010,7 @@ test_refcnt_slave(void *arg)
|
||||
printf("%s started at lcore %u\n", __func__, lcore);
|
||||
|
||||
free = 0;
|
||||
while (refcnt_stop_slaves == 0) {
|
||||
while (refcnt_stop_workers == 0) {
|
||||
if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
|
||||
free++;
|
||||
rte_pktmbuf_free(mp);
|
||||
@ -1038,7 +1038,7 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
|
||||
/* For each mbuf in the pool:
|
||||
* - allocate mbuf,
|
||||
* - increment it's reference up to N+1,
|
||||
* - enqueue it N times into the ring for slave cores to free.
|
||||
* - enqueue it N times into the ring for worker cores to free.
|
||||
*/
|
||||
for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
|
||||
i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
|
||||
@ -1062,7 +1062,7 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
|
||||
rte_panic("(lcore=%u, iter=%u): was able to allocate only "
|
||||
"%u from %u mbufs\n", lcore, iter, i, n);
|
||||
|
||||
/* wait till slave lcores will consume all mbufs */
|
||||
/* wait till worker lcores will consume all mbufs */
|
||||
while (!rte_ring_empty(refcnt_mbuf_ring))
|
||||
;
|
||||
|
||||
@ -1083,7 +1083,7 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
|
||||
}
|
||||
|
||||
static int
|
||||
test_refcnt_master(struct rte_mempool *refcnt_pool,
|
||||
test_refcnt_main(struct rte_mempool *refcnt_pool,
|
||||
struct rte_ring *refcnt_mbuf_ring)
|
||||
{
|
||||
unsigned i, lcore;
|
||||
@ -1094,7 +1094,7 @@ test_refcnt_master(struct rte_mempool *refcnt_pool,
|
||||
for (i = 0; i != REFCNT_MAX_ITER; i++)
|
||||
test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
|
||||
|
||||
refcnt_stop_slaves = 1;
|
||||
refcnt_stop_workers = 1;
|
||||
rte_wmb();
|
||||
|
||||
printf("%s finished at lcore %u\n", __func__, lcore);
|
||||
@ -1107,7 +1107,7 @@ static int
|
||||
test_refcnt_mbuf(void)
|
||||
{
|
||||
#ifdef RTE_MBUF_REFCNT_ATOMIC
|
||||
unsigned int master, slave, tref;
|
||||
unsigned int main_lcore, worker, tref;
|
||||
int ret = -1;
|
||||
struct rte_mempool *refcnt_pool = NULL;
|
||||
struct rte_ring *refcnt_mbuf_ring = NULL;
|
||||
@ -1126,39 +1126,38 @@ test_refcnt_mbuf(void)
|
||||
SOCKET_ID_ANY);
|
||||
if (refcnt_pool == NULL) {
|
||||
printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
|
||||
__func__);
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
|
||||
rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
|
||||
RING_F_SP_ENQ);
|
||||
rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
|
||||
RING_F_SP_ENQ);
|
||||
if (refcnt_mbuf_ring == NULL) {
|
||||
printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
|
||||
"\n", __func__);
|
||||
"\n", __func__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
refcnt_stop_slaves = 0;
|
||||
refcnt_stop_workers = 0;
|
||||
memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
|
||||
|
||||
rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
|
||||
SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
|
||||
|
||||
test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
|
||||
test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
|
||||
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
/* check that we porcessed all references */
|
||||
tref = 0;
|
||||
master = rte_get_master_lcore();
|
||||
main_lcore = rte_get_main_lcore();
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(slave)
|
||||
tref += refcnt_lcore[slave];
|
||||
RTE_LCORE_FOREACH_WORKER(worker)
|
||||
tref += refcnt_lcore[worker];
|
||||
|
||||
if (tref != refcnt_lcore[master])
|
||||
if (tref != refcnt_lcore[main_lcore])
|
||||
rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
|
||||
tref, refcnt_lcore[master]);
|
||||
tref, refcnt_lcore[main_lcore]);
|
||||
|
||||
rte_mempool_dump(stdout, refcnt_pool);
|
||||
rte_ring_dump(stdout, refcnt_mbuf_ring);
|
||||
|
@ -28,7 +28,7 @@
|
||||
* These tests are derived from spin lock test cases.
|
||||
*
|
||||
* - The functional test takes all of these locks and launches the
|
||||
* ''test_mcslock_per_core()'' function on each core (except the master).
|
||||
* ''test_mcslock_per_core()'' function on each core (except the main).
|
||||
*
|
||||
* - The function takes the global lock, display something, then releases
|
||||
* the global lock on each core.
|
||||
@ -123,9 +123,9 @@ test_mcslock_perf(void)
|
||||
printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
|
||||
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
|
||||
|
||||
/* start synchro and launch test on master */
|
||||
/* start synchro and launch test on main */
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
load_loop_fn(&lock);
|
||||
|
||||
@ -154,8 +154,8 @@ test_mcslock_try(__rte_unused void *arg)
|
||||
rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
|
||||
rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
|
||||
|
||||
/* Locked ml_try in the master lcore, so it should fail
|
||||
* when trying to lock it in the slave lcore.
|
||||
/* Locked ml_try in the main lcore, so it should fail
|
||||
* when trying to lock it in the worker lcore.
|
||||
*/
|
||||
if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) {
|
||||
rte_mcslock_lock(&p_ml, &ml_me);
|
||||
@ -185,20 +185,20 @@ test_mcslock(void)
|
||||
* Test mcs lock & unlock on each core
|
||||
*/
|
||||
|
||||
/* slave cores should be waiting: print it */
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
/* worker cores should be waiting: print it */
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
printf("lcore %d state: %d\n", i,
|
||||
(int) rte_eal_get_lcore_state(i));
|
||||
}
|
||||
|
||||
rte_mcslock_lock(&p_ml, &ml_me);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_eal_remote_launch(test_mcslock_per_core, NULL, i);
|
||||
}
|
||||
|
||||
/* slave cores should be busy: print it */
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
/* worker cores should be busy: print it */
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
printf("lcore %d state: %d\n", i,
|
||||
(int) rte_eal_get_lcore_state(i));
|
||||
}
|
||||
@ -210,19 +210,19 @@ test_mcslock(void)
|
||||
/*
|
||||
* Test if it could return immediately from try-locking a locked object.
|
||||
* Here it will lock the mcs lock object first, then launch all the
|
||||
* slave lcores to trylock the same mcs lock object.
|
||||
* All the slave lcores should give up try-locking a locked object and
|
||||
* worker lcores to trylock the same mcs lock object.
|
||||
* All the worker lcores should give up try-locking a locked object and
|
||||
* return immediately, and then increase the "count" initialized with
|
||||
* zero by one per times.
|
||||
* We can check if the "count" is finally equal to the number of all
|
||||
* slave lcores to see if the behavior of try-locking a locked
|
||||
* worker lcores to see if the behavior of try-locking a locked
|
||||
* mcslock object is correct.
|
||||
*/
|
||||
if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0)
|
||||
return -1;
|
||||
|
||||
count = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_eal_remote_launch(test_mcslock_try, NULL, i);
|
||||
}
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
@ -143,8 +143,8 @@ per_lcore_mempool_test(void *arg)
|
||||
|
||||
stats[lcore_id].enq_count = 0;
|
||||
|
||||
/* wait synchro for slaves */
|
||||
if (lcore_id != rte_get_master_lcore())
|
||||
/* wait synchro for workers */
|
||||
if (lcore_id != rte_get_main_lcore())
|
||||
while (rte_atomic32_read(&synchro) == 0);
|
||||
|
||||
start_cycles = rte_get_timer_cycles();
|
||||
@ -214,7 +214,7 @@ launch_cores(struct rte_mempool *mp, unsigned int cores)
|
||||
return -1;
|
||||
}
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (cores == 1)
|
||||
break;
|
||||
cores--;
|
||||
@ -222,13 +222,13 @@ launch_cores(struct rte_mempool *mp, unsigned int cores)
|
||||
mp, lcore_id);
|
||||
}
|
||||
|
||||
/* start synchro and launch test on master */
|
||||
/* start synchro and launch test on main */
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
|
||||
ret = per_lcore_mempool_test(mp);
|
||||
|
||||
cores = cores_save;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (cores == 1)
|
||||
break;
|
||||
cores--;
|
||||
|
@ -94,7 +94,7 @@ run_secondary_instances(void)
|
||||
#endif
|
||||
|
||||
snprintf(coremask, sizeof(coremask), "%x", \
|
||||
(1 << rte_get_master_lcore()));
|
||||
(1 << rte_get_main_lcore()));
|
||||
|
||||
ret |= launch_proc(argv1);
|
||||
printf("### Testing rte_mp_disable() reject:\n");
|
||||
|
@ -184,7 +184,7 @@ run_pdump_server_tests(void)
|
||||
};
|
||||
|
||||
snprintf(coremask, sizeof(coremask), "%x",
|
||||
(1 << rte_get_master_lcore()));
|
||||
(1 << rte_get_main_lcore()));
|
||||
|
||||
ret = test_pdump_init();
|
||||
ret |= launch_p(argv1);
|
||||
|
@ -73,31 +73,31 @@ test_per_lcore(void)
|
||||
unsigned lcore_id;
|
||||
int ret;
|
||||
|
||||
rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MASTER);
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MAIN);
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MASTER);
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MAIN);
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* test if it could do remote launch twice at the same time or not */
|
||||
ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
|
||||
ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MAIN);
|
||||
if (ret < 0) {
|
||||
printf("It fails to do remote launch but it should able to do\n");
|
||||
return -1;
|
||||
}
|
||||
/* it should not be able to launch a lcore which is running */
|
||||
ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
|
||||
ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MAIN);
|
||||
if (ret == 0) {
|
||||
printf("It does remote launch successfully but it should not at this time\n");
|
||||
return -1;
|
||||
}
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ alloc_lcore(uint16_t socketid)
|
||||
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
|
||||
if (LCORE_AVAIL != lcore_conf[lcore_id].status ||
|
||||
lcore_conf[lcore_id].socketid != socketid ||
|
||||
lcore_id == rte_get_master_lcore())
|
||||
lcore_id == rte_get_main_lcore())
|
||||
continue;
|
||||
lcore_conf[lcore_id].status = LCORE_USED;
|
||||
lcore_conf[lcore_id].nb_ports = 0;
|
||||
@ -661,7 +661,7 @@ exec_burst(uint32_t flags, int lcore)
|
||||
static int
|
||||
test_pmd_perf(void)
|
||||
{
|
||||
uint16_t nb_ports, num, nb_lcores, slave_id = (uint16_t)-1;
|
||||
uint16_t nb_ports, num, nb_lcores, worker_id = (uint16_t)-1;
|
||||
uint16_t nb_rxd = MAX_TRAFFIC_BURST;
|
||||
uint16_t nb_txd = MAX_TRAFFIC_BURST;
|
||||
uint16_t portid;
|
||||
@ -699,13 +699,13 @@ test_pmd_perf(void)
|
||||
RTE_ETH_FOREACH_DEV(portid) {
|
||||
if (socketid == -1) {
|
||||
socketid = rte_eth_dev_socket_id(portid);
|
||||
slave_id = alloc_lcore(socketid);
|
||||
if (slave_id == (uint16_t)-1) {
|
||||
worker_id = alloc_lcore(socketid);
|
||||
if (worker_id == (uint16_t)-1) {
|
||||
printf("No avail lcore to run test\n");
|
||||
return -1;
|
||||
}
|
||||
printf("Performance test runs on lcore %u socket %u\n",
|
||||
slave_id, socketid);
|
||||
worker_id, socketid);
|
||||
}
|
||||
|
||||
if (socketid != rte_eth_dev_socket_id(portid)) {
|
||||
@ -762,8 +762,8 @@ test_pmd_perf(void)
|
||||
"rte_eth_promiscuous_enable: err=%s, port=%d\n",
|
||||
rte_strerror(-ret), portid);
|
||||
|
||||
lcore_conf[slave_id].portlist[num++] = portid;
|
||||
lcore_conf[slave_id].nb_ports++;
|
||||
lcore_conf[worker_id].portlist[num++] = portid;
|
||||
lcore_conf[worker_id].nb_ports++;
|
||||
}
|
||||
check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
|
||||
|
||||
@ -788,13 +788,13 @@ test_pmd_perf(void)
|
||||
if (NULL == do_measure)
|
||||
do_measure = measure_rxtx;
|
||||
|
||||
rte_eal_remote_launch(main_loop, NULL, slave_id);
|
||||
rte_eal_remote_launch(main_loop, NULL, worker_id);
|
||||
|
||||
if (rte_eal_wait_lcore(slave_id) < 0)
|
||||
if (rte_eal_wait_lcore(worker_id) < 0)
|
||||
return -1;
|
||||
} else if (sc_flag == SC_BURST_POLL_FIRST ||
|
||||
sc_flag == SC_BURST_XMIT_FIRST)
|
||||
if (exec_burst(sc_flag, slave_id) < 0)
|
||||
if (exec_burst(sc_flag, worker_id) < 0)
|
||||
return -1;
|
||||
|
||||
/* port tear down */
|
||||
|
@ -1327,7 +1327,7 @@ test_rcu_qsbr_main(void)
|
||||
}
|
||||
|
||||
num_cores = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(core_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(core_id) {
|
||||
enabled_core_ids[num_cores] = core_id;
|
||||
num_cores++;
|
||||
}
|
||||
|
@ -625,7 +625,7 @@ test_rcu_qsbr_main(void)
|
||||
rte_atomic64_init(&check_cycles);
|
||||
|
||||
num_cores = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(core_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(core_id) {
|
||||
enabled_core_ids[num_cores] = core_id;
|
||||
num_cores++;
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r, const int esize)
|
||||
lcore_count = 0;
|
||||
param1.size = param2.size = bulk_sizes[i];
|
||||
param1.r = param2.r = r;
|
||||
if (cores->c1 == rte_get_master_lcore()) {
|
||||
if (cores->c1 == rte_get_main_lcore()) {
|
||||
rte_eal_remote_launch(f2, ¶m2, cores->c2);
|
||||
f1(¶m1);
|
||||
rte_eal_wait_lcore(cores->c2);
|
||||
@ -340,8 +340,8 @@ load_loop_fn_helper(struct thread_params *p, const int esize)
|
||||
if (burst == NULL)
|
||||
return -1;
|
||||
|
||||
/* wait synchro for slaves */
|
||||
if (lcore != rte_get_master_lcore())
|
||||
/* wait synchro for workers */
|
||||
if (lcore != rte_get_main_lcore())
|
||||
while (rte_atomic32_read(&synchro) == 0)
|
||||
rte_pause();
|
||||
|
||||
@ -397,12 +397,12 @@ run_on_all_cores(struct rte_ring *r, const int esize)
|
||||
param.size = bulk_sizes[i];
|
||||
param.r = r;
|
||||
|
||||
/* clear synchro and start slaves */
|
||||
/* clear synchro and start workers */
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MASTER) < 0)
|
||||
if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MAIN) < 0)
|
||||
return -1;
|
||||
|
||||
/* start synchro and launch test on master */
|
||||
/* start synchro and launch test on main */
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
lcore_f(¶m);
|
||||
|
||||
@ -553,7 +553,7 @@ test_ring_perf_esize(const int esize)
|
||||
goto test_fail;
|
||||
}
|
||||
|
||||
printf("\n### Testing using all slave nodes ###\n");
|
||||
printf("\n### Testing using all worker nodes ###\n");
|
||||
if (run_on_all_cores(r, esize) < 0)
|
||||
goto test_fail;
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
/**
|
||||
* Stress test for ring enqueue/dequeue operations.
|
||||
* Performs the following pattern on each slave worker:
|
||||
* Performs the following pattern on each worker:
|
||||
* dequeue/read-write data from the dequeued objects/enqueue.
|
||||
* Serves as both functional and performance test of ring
|
||||
* enqueue/dequeue operations under high contention
|
||||
@ -348,8 +348,8 @@ test_mt1(int (*test)(void *))
|
||||
|
||||
memset(arg, 0, sizeof(arg));
|
||||
|
||||
/* launch on all slaves */
|
||||
RTE_LCORE_FOREACH_SLAVE(lc) {
|
||||
/* launch on all workers */
|
||||
RTE_LCORE_FOREACH_WORKER(lc) {
|
||||
arg[lc].rng = r;
|
||||
arg[lc].stats = init_stat;
|
||||
rte_eal_remote_launch(test, &arg[lc], lc);
|
||||
@ -365,12 +365,12 @@ test_mt1(int (*test)(void *))
|
||||
wrk_cmd = WRK_CMD_STOP;
|
||||
rte_smp_wmb();
|
||||
|
||||
/* wait for slaves and collect stats. */
|
||||
/* wait for workers and collect stats. */
|
||||
mc = rte_lcore_id();
|
||||
arg[mc].stats = init_stat;
|
||||
|
||||
rc = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lc) {
|
||||
RTE_LCORE_FOREACH_WORKER(lc) {
|
||||
rc |= rte_eal_wait_lcore(lc);
|
||||
lcore_stat_aggr(&arg[mc].stats, &arg[lc].stats);
|
||||
if (verbose != 0)
|
||||
|
@ -99,8 +99,8 @@ load_loop_fn(__rte_unused void *arg)
|
||||
uint64_t lcount = 0;
|
||||
const unsigned int lcore = rte_lcore_id();
|
||||
|
||||
/* wait synchro for slaves */
|
||||
if (lcore != rte_get_master_lcore())
|
||||
/* wait synchro for workers */
|
||||
if (lcore != rte_get_main_lcore())
|
||||
while (rte_atomic32_read(&synchro) == 0)
|
||||
;
|
||||
|
||||
@ -134,12 +134,12 @@ test_rwlock_perf(void)
|
||||
|
||||
printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
|
||||
|
||||
/* clear synchro and start slaves */
|
||||
/* clear synchro and start workers */
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
|
||||
if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
|
||||
return -1;
|
||||
|
||||
/* start synchro and launch test on master */
|
||||
/* start synchro and launch test on main */
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
load_loop_fn(NULL);
|
||||
|
||||
@ -161,7 +161,7 @@ test_rwlock_perf(void)
|
||||
* - There is a global rwlock and a table of rwlocks (one per lcore).
|
||||
*
|
||||
* - The test function takes all of these locks and launches the
|
||||
* ``test_rwlock_per_core()`` function on each core (except the master).
|
||||
* ``test_rwlock_per_core()`` function on each core (except the main).
|
||||
*
|
||||
* - The function takes the global write lock, display something,
|
||||
* then releases the global lock.
|
||||
@ -187,21 +187,21 @@ rwlock_test1(void)
|
||||
|
||||
rte_rwlock_write_lock(&sl);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_rwlock_write_lock(&sl_tab[i]);
|
||||
rte_eal_remote_launch(test_rwlock_per_core, NULL, i);
|
||||
}
|
||||
|
||||
rte_rwlock_write_unlock(&sl);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_rwlock_write_unlock(&sl_tab[i]);
|
||||
rte_delay_ms(100);
|
||||
}
|
||||
|
||||
rte_rwlock_write_lock(&sl);
|
||||
/* this message should be the last message of test */
|
||||
printf("Global write lock taken on master core %u\n", rte_lcore_id());
|
||||
printf("Global write lock taken on main core %u\n", rte_lcore_id());
|
||||
rte_rwlock_write_unlock(&sl);
|
||||
|
||||
rte_eal_mp_wait_lcore();
|
||||
@ -462,26 +462,26 @@ try_rwlock_test_rda(void)
|
||||
try_test_reset();
|
||||
|
||||
/* start read test on all avaialble lcores */
|
||||
rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MASTER);
|
||||
rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MAIN);
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
return process_try_lcore_stats();
|
||||
}
|
||||
|
||||
/* all slave lcores grab RDLOCK, master one grabs WRLOCK */
|
||||
/* all worker lcores grab RDLOCK, main one grabs WRLOCK */
|
||||
static int
|
||||
try_rwlock_test_rds_wrm(void)
|
||||
{
|
||||
try_test_reset();
|
||||
|
||||
rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MAIN);
|
||||
try_write_lcore(NULL);
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
return process_try_lcore_stats();
|
||||
}
|
||||
|
||||
/* master and even slave lcores grab RDLOCK, odd lcores grab WRLOCK */
|
||||
/* main and even worker lcores grab RDLOCK, odd lcores grab WRLOCK */
|
||||
static int
|
||||
try_rwlock_test_rde_wro(void)
|
||||
{
|
||||
@ -489,7 +489,7 @@ try_rwlock_test_rde_wro(void)
|
||||
|
||||
try_test_reset();
|
||||
|
||||
mlc = rte_get_master_lcore();
|
||||
mlc = rte_get_main_lcore();
|
||||
|
||||
RTE_LCORE_FOREACH(lc) {
|
||||
if (lc != mlc) {
|
||||
|
@ -30,7 +30,7 @@ static int
|
||||
testsuite_setup(void)
|
||||
{
|
||||
slcore_id = rte_get_next_lcore(/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
|
||||
return TEST_SUCCESS;
|
||||
@ -561,12 +561,12 @@ service_lcore_add_del(void)
|
||||
TEST_ASSERT_EQUAL(1, rte_service_lcore_count(),
|
||||
"Service core count not equal to one");
|
||||
uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
|
||||
"Service core add did not return zero");
|
||||
uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
|
||||
"Service core add did not return zero");
|
||||
@ -612,12 +612,12 @@ service_threaded_test(int mt_safe)
|
||||
|
||||
/* add next 2 cores */
|
||||
uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
|
||||
"mt safe lcore add fail");
|
||||
uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
|
||||
"mt safe lcore add fail");
|
||||
@ -951,10 +951,10 @@ service_active_two_cores(void)
|
||||
int i;
|
||||
|
||||
uint32_t lcore = rte_get_next_lcore(/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
uint32_t slcore = rte_get_next_lcore(/* start core */ lcore,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
|
||||
/* start the service on the second available lcore */
|
||||
|
@ -28,7 +28,7 @@
|
||||
* - There is a global spinlock and a table of spinlocks (one per lcore).
|
||||
*
|
||||
* - The test function takes all of these locks and launches the
|
||||
* ``test_spinlock_per_core()`` function on each core (except the master).
|
||||
* ``test_spinlock_per_core()`` function on each core (except the main).
|
||||
*
|
||||
* - The function takes the global lock, display something, then releases
|
||||
* the global lock.
|
||||
@ -109,8 +109,8 @@ load_loop_fn(void *func_param)
|
||||
const int use_lock = *(int*)func_param;
|
||||
const unsigned lcore = rte_lcore_id();
|
||||
|
||||
/* wait synchro for slaves */
|
||||
if (lcore != rte_get_master_lcore())
|
||||
/* wait synchro for workers */
|
||||
if (lcore != rte_get_main_lcore())
|
||||
while (rte_atomic32_read(&synchro) == 0);
|
||||
|
||||
begin = rte_get_timer_cycles();
|
||||
@ -149,11 +149,11 @@ test_spinlock_perf(void)
|
||||
|
||||
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
|
||||
|
||||
/* Clear synchro and start slaves */
|
||||
/* Clear synchro and start workers */
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
|
||||
|
||||
/* start synchro and launch test on master */
|
||||
/* start synchro and launch test on main */
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
load_loop_fn(&lock);
|
||||
|
||||
@ -200,8 +200,8 @@ test_spinlock(void)
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* slave cores should be waiting: print it */
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
/* worker cores should be waiting: print it */
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
printf("lcore %d state: %d\n", i,
|
||||
(int) rte_eal_get_lcore_state(i));
|
||||
}
|
||||
@ -214,19 +214,19 @@ test_spinlock(void)
|
||||
|
||||
rte_spinlock_lock(&sl);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_spinlock_lock(&sl_tab[i]);
|
||||
rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
|
||||
}
|
||||
|
||||
/* slave cores should be busy: print it */
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
/* worker cores should be busy: print it */
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
printf("lcore %d state: %d\n", i,
|
||||
(int) rte_eal_get_lcore_state(i));
|
||||
}
|
||||
rte_spinlock_unlock(&sl);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_spinlock_unlock(&sl_tab[i]);
|
||||
rte_delay_ms(10);
|
||||
}
|
||||
@ -245,7 +245,7 @@ test_spinlock(void)
|
||||
} else
|
||||
rte_spinlock_recursive_unlock(&slr);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
|
||||
}
|
||||
rte_spinlock_recursive_unlock(&slr);
|
||||
@ -253,12 +253,12 @@ test_spinlock(void)
|
||||
|
||||
/*
|
||||
* Test if it could return immediately from try-locking a locked object.
|
||||
* Here it will lock the spinlock object first, then launch all the slave
|
||||
* Here it will lock the spinlock object first, then launch all the worker
|
||||
* lcores to trylock the same spinlock object.
|
||||
* All the slave lcores should give up try-locking a locked object and
|
||||
* All the worker lcores should give up try-locking a locked object and
|
||||
* return immediately, and then increase the "count" initialized with zero
|
||||
* by one per times.
|
||||
* We can check if the "count" is finally equal to the number of all slave
|
||||
* We can check if the "count" is finally equal to the number of all worker
|
||||
* lcores to see if the behavior of try-locking a locked spinlock object
|
||||
* is correct.
|
||||
*/
|
||||
@ -266,7 +266,7 @@ test_spinlock(void)
|
||||
return -1;
|
||||
}
|
||||
count = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_eal_remote_launch(test_spinlock_try, NULL, i);
|
||||
}
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
@ -328,7 +328,7 @@ test_stack_multithreaded(uint32_t flags)
|
||||
|
||||
thread_test_args.s = s;
|
||||
|
||||
if (rte_eal_mp_remote_launch(stack_thread_push_pop, NULL, CALL_MASTER))
|
||||
if (rte_eal_mp_remote_launch(stack_thread_push_pop, NULL, CALL_MAIN))
|
||||
rte_panic("Failed to launch tests\n");
|
||||
|
||||
RTE_LCORE_FOREACH(lcore_id) {
|
||||
|
@ -180,7 +180,7 @@ run_on_core_pair(struct lcore_pair *cores, struct rte_stack *s,
|
||||
args[0].sz = args[1].sz = bulk_sizes[i];
|
||||
args[0].s = args[1].s = s;
|
||||
|
||||
if (cores->c1 == rte_get_master_lcore()) {
|
||||
if (cores->c1 == rte_get_main_lcore()) {
|
||||
rte_eal_remote_launch(fn, &args[1], cores->c2);
|
||||
fn(&args[0]);
|
||||
rte_eal_wait_lcore(cores->c2);
|
||||
@ -210,7 +210,7 @@ run_on_n_cores(struct rte_stack *s, lcore_function_t fn, int n)
|
||||
|
||||
rte_atomic32_set(&lcore_barrier, n);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (++cnt >= n)
|
||||
break;
|
||||
|
||||
@ -235,7 +235,7 @@ run_on_n_cores(struct rte_stack *s, lcore_function_t fn, int n)
|
||||
avg = args[rte_lcore_id()].avg;
|
||||
|
||||
cnt = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (++cnt >= n)
|
||||
break;
|
||||
avg += args[lcore_id].avg;
|
||||
|
@ -28,7 +28,7 @@
|
||||
* - There is a global ticketlock and a table of ticketlocks (one per lcore).
|
||||
*
|
||||
* - The test function takes all of these locks and launches the
|
||||
* ``test_ticketlock_per_core()`` function on each core (except the master).
|
||||
* ``test_ticketlock_per_core()`` function on each core (except the main).
|
||||
*
|
||||
* - The function takes the global lock, display something, then releases
|
||||
* the global lock.
|
||||
@ -110,8 +110,8 @@ load_loop_fn(void *func_param)
|
||||
const int use_lock = *(int *)func_param;
|
||||
const unsigned int lcore = rte_lcore_id();
|
||||
|
||||
/* wait synchro for slaves */
|
||||
if (lcore != rte_get_master_lcore())
|
||||
/* wait synchro for workers */
|
||||
if (lcore != rte_get_main_lcore())
|
||||
while (rte_atomic32_read(&synchro) == 0)
|
||||
;
|
||||
|
||||
@ -154,11 +154,11 @@ test_ticketlock_perf(void)
|
||||
lcount = 0;
|
||||
printf("\nTest with lock on %u cores...\n", rte_lcore_count());
|
||||
|
||||
/* Clear synchro and start slaves */
|
||||
/* Clear synchro and start workers */
|
||||
rte_atomic32_set(&synchro, 0);
|
||||
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
|
||||
|
||||
/* start synchro and launch test on master */
|
||||
/* start synchro and launch test on main */
|
||||
rte_atomic32_set(&synchro, 1);
|
||||
load_loop_fn(&lock);
|
||||
|
||||
@ -208,8 +208,8 @@ test_ticketlock(void)
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* slave cores should be waiting: print it */
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
/* worker cores should be waiting: print it */
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
printf("lcore %d state: %d\n", i,
|
||||
(int) rte_eal_get_lcore_state(i));
|
||||
}
|
||||
@ -217,25 +217,25 @@ test_ticketlock(void)
|
||||
rte_ticketlock_init(&tl);
|
||||
rte_ticketlock_init(&tl_try);
|
||||
rte_ticketlock_recursive_init(&tlr);
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_ticketlock_init(&tl_tab[i]);
|
||||
}
|
||||
|
||||
rte_ticketlock_lock(&tl);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_ticketlock_lock(&tl_tab[i]);
|
||||
rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
|
||||
}
|
||||
|
||||
/* slave cores should be busy: print it */
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
/* worker cores should be busy: print it */
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
printf("lcore %d state: %d\n", i,
|
||||
(int) rte_eal_get_lcore_state(i));
|
||||
}
|
||||
rte_ticketlock_unlock(&tl);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_ticketlock_unlock(&tl_tab[i]);
|
||||
rte_delay_ms(10);
|
||||
}
|
||||
@ -254,7 +254,7 @@ test_ticketlock(void)
|
||||
} else
|
||||
rte_ticketlock_recursive_unlock(&tlr);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_eal_remote_launch(test_ticketlock_recursive_per_core,
|
||||
NULL, i);
|
||||
}
|
||||
@ -264,19 +264,19 @@ test_ticketlock(void)
|
||||
/*
|
||||
* Test if it could return immediately from try-locking a locked object.
|
||||
* Here it will lock the ticketlock object first, then launch all the
|
||||
* slave lcores to trylock the same ticketlock object.
|
||||
* All the slave lcores should give up try-locking a locked object and
|
||||
* worker lcores to trylock the same ticketlock object.
|
||||
* All the worker lcores should give up try-locking a locked object and
|
||||
* return immediately, and then increase the "count" initialized with
|
||||
* zero by one per times.
|
||||
* We can check if the "count" is finally equal to the number of all
|
||||
* slave lcores to see if the behavior of try-locking a locked
|
||||
* worker lcores to see if the behavior of try-locking a locked
|
||||
* ticketlock object is correct.
|
||||
*/
|
||||
if (rte_ticketlock_trylock(&tl_try) == 0)
|
||||
return -1;
|
||||
|
||||
count = 0;
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_eal_remote_launch(test_ticketlock_try, NULL, i);
|
||||
}
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
@ -37,7 +37,7 @@
|
||||
* - All cores then simultaneously are set to schedule all the timers at
|
||||
* the same time, so conflicts should occur.
|
||||
* - Then there is a delay while we wait for the timers to expire
|
||||
* - Then the master lcore calls timer_manage() and we check that all
|
||||
* - Then the main lcore calls timer_manage() and we check that all
|
||||
* timers have had their callbacks called exactly once - no more no less.
|
||||
* - Then we repeat the process, except after setting up the timers, we have
|
||||
* all cores randomly reschedule them.
|
||||
@ -58,7 +58,7 @@
|
||||
*
|
||||
* - timer0
|
||||
*
|
||||
* - At initialization, timer0 is loaded by the master core, on master core
|
||||
* - At initialization, timer0 is loaded by the main core, on main core
|
||||
* in "single" mode (time = 1 second).
|
||||
* - In the first 19 callbacks, timer0 is reloaded on the same core,
|
||||
* then, it is explicitly stopped at the 20th call.
|
||||
@ -66,21 +66,21 @@
|
||||
*
|
||||
* - timer1
|
||||
*
|
||||
* - At initialization, timer1 is loaded by the master core, on the
|
||||
* master core in "single" mode (time = 2 seconds).
|
||||
* - At initialization, timer1 is loaded by the main core, on the
|
||||
* main core in "single" mode (time = 2 seconds).
|
||||
* - In the first 9 callbacks, timer1 is reloaded on another
|
||||
* core. After the 10th callback, timer1 is not reloaded anymore.
|
||||
*
|
||||
* - timer2
|
||||
*
|
||||
* - At initialization, timer2 is loaded by the master core, on the
|
||||
* master core in "periodical" mode (time = 1 second).
|
||||
* - At initialization, timer2 is loaded by the main core, on the
|
||||
* main core in "periodical" mode (time = 1 second).
|
||||
* - In the callback, when t=25s, it stops timer3 and reloads timer0
|
||||
* on the current core.
|
||||
*
|
||||
* - timer3
|
||||
*
|
||||
* - At initialization, timer3 is loaded by the master core, on
|
||||
* - At initialization, timer3 is loaded by the main core, on
|
||||
* another core in "periodical" mode (time = 1 second).
|
||||
* - It is stopped at t=25s by timer2.
|
||||
*/
|
||||
@ -201,68 +201,69 @@ timer_stress_main_loop(__rte_unused void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Need to synchronize slave lcores through multiple steps. */
|
||||
enum { SLAVE_WAITING = 1, SLAVE_RUN_SIGNAL, SLAVE_RUNNING, SLAVE_FINISHED };
|
||||
static rte_atomic16_t slave_state[RTE_MAX_LCORE];
|
||||
/* Need to synchronize worker lcores through multiple steps. */
|
||||
enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
|
||||
static rte_atomic16_t lcore_state[RTE_MAX_LCORE];
|
||||
|
||||
static void
|
||||
master_init_slaves(void)
|
||||
main_init_workers(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
rte_atomic16_set(&slave_state[i], SLAVE_WAITING);
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_atomic16_set(&lcore_state[i], WORKER_WAITING);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
master_start_slaves(void)
|
||||
main_start_workers(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
rte_atomic16_set(&slave_state[i], SLAVE_RUN_SIGNAL);
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
rte_atomic16_set(&lcore_state[i], WORKER_RUN_SIGNAL);
|
||||
}
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
while (rte_atomic16_read(&slave_state[i]) != SLAVE_RUNNING)
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
while (rte_atomic16_read(&lcore_state[i]) != WORKER_RUNNING)
|
||||
rte_pause();
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
master_wait_for_slaves(void)
|
||||
main_wait_for_workers(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
while (rte_atomic16_read(&slave_state[i]) != SLAVE_FINISHED)
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
while (rte_atomic16_read(&lcore_state[i]) != WORKER_FINISHED)
|
||||
rte_pause();
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
slave_wait_to_start(void)
|
||||
worker_wait_to_start(void)
|
||||
{
|
||||
unsigned lcore_id = rte_lcore_id();
|
||||
|
||||
while (rte_atomic16_read(&slave_state[lcore_id]) != SLAVE_RUN_SIGNAL)
|
||||
while (rte_atomic16_read(&lcore_state[lcore_id]) != WORKER_RUN_SIGNAL)
|
||||
rte_pause();
|
||||
rte_atomic16_set(&slave_state[lcore_id], SLAVE_RUNNING);
|
||||
rte_atomic16_set(&lcore_state[lcore_id], WORKER_RUNNING);
|
||||
}
|
||||
|
||||
static void
|
||||
slave_finish(void)
|
||||
worker_finish(void)
|
||||
{
|
||||
unsigned lcore_id = rte_lcore_id();
|
||||
|
||||
rte_atomic16_set(&slave_state[lcore_id], SLAVE_FINISHED);
|
||||
rte_atomic16_set(&lcore_state[lcore_id], WORKER_FINISHED);
|
||||
}
|
||||
|
||||
|
||||
static volatile int cb_count = 0;
|
||||
|
||||
/* callback for second stress test. will only be called
|
||||
* on master lcore */
|
||||
* on main lcore
|
||||
*/
|
||||
static void
|
||||
timer_stress2_cb(struct rte_timer *tim __rte_unused, void *arg __rte_unused)
|
||||
{
|
||||
@ -277,36 +278,36 @@ timer_stress2_main_loop(__rte_unused void *arg)
|
||||
static struct rte_timer *timers;
|
||||
int i, ret;
|
||||
uint64_t delay = rte_get_timer_hz() / 20;
|
||||
unsigned lcore_id = rte_lcore_id();
|
||||
unsigned master = rte_get_master_lcore();
|
||||
unsigned int lcore_id = rte_lcore_id();
|
||||
unsigned int main_lcore = rte_get_main_lcore();
|
||||
int32_t my_collisions = 0;
|
||||
static rte_atomic32_t collisions;
|
||||
|
||||
if (lcore_id == master) {
|
||||
if (lcore_id == main_lcore) {
|
||||
cb_count = 0;
|
||||
test_failed = 0;
|
||||
rte_atomic32_set(&collisions, 0);
|
||||
master_init_slaves();
|
||||
main_init_workers();
|
||||
timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
|
||||
if (timers == NULL) {
|
||||
printf("Test Failed\n");
|
||||
printf("- Cannot allocate memory for timers\n" );
|
||||
test_failed = 1;
|
||||
master_start_slaves();
|
||||
main_start_workers();
|
||||
goto cleanup;
|
||||
}
|
||||
for (i = 0; i < NB_STRESS2_TIMERS; i++)
|
||||
rte_timer_init(&timers[i]);
|
||||
master_start_slaves();
|
||||
main_start_workers();
|
||||
} else {
|
||||
slave_wait_to_start();
|
||||
worker_wait_to_start();
|
||||
if (test_failed)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* have all cores schedule all timers on master lcore */
|
||||
/* have all cores schedule all timers on main lcore */
|
||||
for (i = 0; i < NB_STRESS2_TIMERS; i++) {
|
||||
ret = rte_timer_reset(&timers[i], delay, SINGLE, master,
|
||||
ret = rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
|
||||
timer_stress2_cb, NULL);
|
||||
/* there will be collisions when multiple cores simultaneously
|
||||
* configure the same timers */
|
||||
@ -320,14 +321,14 @@ timer_stress2_main_loop(__rte_unused void *arg)
|
||||
rte_delay_ms(100);
|
||||
|
||||
/* all cores rendezvous */
|
||||
if (lcore_id == master) {
|
||||
master_wait_for_slaves();
|
||||
if (lcore_id == main_lcore) {
|
||||
main_wait_for_workers();
|
||||
} else {
|
||||
slave_finish();
|
||||
worker_finish();
|
||||
}
|
||||
|
||||
/* now check that we get the right number of callbacks */
|
||||
if (lcore_id == master) {
|
||||
if (lcore_id == main_lcore) {
|
||||
my_collisions = rte_atomic32_read(&collisions);
|
||||
if (my_collisions != 0)
|
||||
printf("- %d timer reset collisions (OK)\n", my_collisions);
|
||||
@ -338,23 +339,23 @@ timer_stress2_main_loop(__rte_unused void *arg)
|
||||
printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
|
||||
cb_count);
|
||||
test_failed = 1;
|
||||
master_start_slaves();
|
||||
main_start_workers();
|
||||
goto cleanup;
|
||||
}
|
||||
cb_count = 0;
|
||||
|
||||
/* proceed */
|
||||
master_start_slaves();
|
||||
main_start_workers();
|
||||
} else {
|
||||
/* proceed */
|
||||
slave_wait_to_start();
|
||||
worker_wait_to_start();
|
||||
if (test_failed)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* now test again, just stop and restart timers at random after init*/
|
||||
for (i = 0; i < NB_STRESS2_TIMERS; i++)
|
||||
rte_timer_reset(&timers[i], delay, SINGLE, master,
|
||||
rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
|
||||
timer_stress2_cb, NULL);
|
||||
|
||||
/* pick random timer to reset, stopping them first half the time */
|
||||
@ -362,7 +363,7 @@ timer_stress2_main_loop(__rte_unused void *arg)
|
||||
int r = rand() % NB_STRESS2_TIMERS;
|
||||
if (i % 2)
|
||||
rte_timer_stop(&timers[r]);
|
||||
rte_timer_reset(&timers[r], delay, SINGLE, master,
|
||||
rte_timer_reset(&timers[r], delay, SINGLE, main_lcore,
|
||||
timer_stress2_cb, NULL);
|
||||
}
|
||||
|
||||
@ -370,8 +371,8 @@ timer_stress2_main_loop(__rte_unused void *arg)
|
||||
rte_delay_ms(100);
|
||||
|
||||
/* now check that we get the right number of callbacks */
|
||||
if (lcore_id == master) {
|
||||
master_wait_for_slaves();
|
||||
if (lcore_id == main_lcore) {
|
||||
main_wait_for_workers();
|
||||
|
||||
rte_timer_manage();
|
||||
if (cb_count != NB_STRESS2_TIMERS) {
|
||||
@ -386,14 +387,14 @@ timer_stress2_main_loop(__rte_unused void *arg)
|
||||
}
|
||||
|
||||
cleanup:
|
||||
if (lcore_id == master) {
|
||||
master_wait_for_slaves();
|
||||
if (lcore_id == main_lcore) {
|
||||
main_wait_for_workers();
|
||||
if (timers != NULL) {
|
||||
rte_free(timers);
|
||||
timers = NULL;
|
||||
}
|
||||
} else {
|
||||
slave_finish();
|
||||
worker_finish();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -465,7 +466,7 @@ timer_basic_main_loop(__rte_unused void *arg)
|
||||
int64_t diff = 0;
|
||||
|
||||
/* launch all timers on core 0 */
|
||||
if (lcore_id == rte_get_master_lcore()) {
|
||||
if (lcore_id == rte_get_main_lcore()) {
|
||||
mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
|
||||
timer_basic_cb);
|
||||
mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
|
||||
@ -563,7 +564,7 @@ test_timer(void)
|
||||
|
||||
/* start other cores */
|
||||
printf("Start timer stress tests\n");
|
||||
rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MASTER);
|
||||
rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MAIN);
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
/* stop timer 0 used for stress test */
|
||||
@ -572,7 +573,7 @@ test_timer(void)
|
||||
/* run a second, slightly different set of stress tests */
|
||||
printf("\nStart timer stress tests 2\n");
|
||||
test_failed = 0;
|
||||
rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MASTER);
|
||||
rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MAIN);
|
||||
rte_eal_mp_wait_lcore();
|
||||
if (test_failed)
|
||||
return TEST_FAILED;
|
||||
@ -584,7 +585,7 @@ test_timer(void)
|
||||
|
||||
/* start other cores */
|
||||
printf("\nStart timer basic tests\n");
|
||||
rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MASTER);
|
||||
rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MAIN);
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
/* stop all timers */
|
||||
|
@ -54,10 +54,10 @@
|
||||
#define N_TIMERS 50
|
||||
|
||||
static struct rte_timer timer[N_TIMERS];
|
||||
static unsigned timer_lcore_id[N_TIMERS];
|
||||
static unsigned int timer_lcore_id[N_TIMERS];
|
||||
|
||||
static unsigned master;
|
||||
static volatile unsigned stop_slaves;
|
||||
static unsigned int main_lcore;
|
||||
static volatile unsigned int stop_workers;
|
||||
|
||||
static int reload_timer(struct rte_timer *tim);
|
||||
|
||||
@ -90,7 +90,7 @@ reload_timer(struct rte_timer *tim)
|
||||
(tim - timer);
|
||||
int ret;
|
||||
|
||||
ret = rte_timer_reset(tim, ticks, PERIODICAL, master, timer_cb, NULL);
|
||||
ret = rte_timer_reset(tim, ticks, PERIODICAL, main_lcore, timer_cb, NULL);
|
||||
if (ret != 0) {
|
||||
rte_log(RTE_LOG_DEBUG, timer_logtype_test,
|
||||
"- core %u failed to reset timer %" PRIuPTR " (OK)\n",
|
||||
@ -101,7 +101,7 @@ reload_timer(struct rte_timer *tim)
|
||||
}
|
||||
|
||||
static int
|
||||
slave_main_loop(__rte_unused void *arg)
|
||||
worker_main_loop(__rte_unused void *arg)
|
||||
{
|
||||
unsigned lcore_id = rte_lcore_id();
|
||||
unsigned i;
|
||||
@ -110,7 +110,7 @@ slave_main_loop(__rte_unused void *arg)
|
||||
|
||||
printf("Starting main loop on core %u\n", lcore_id);
|
||||
|
||||
while (!stop_slaves) {
|
||||
while (!stop_workers) {
|
||||
/* Wait until the timer manager is running.
|
||||
* We know it's running when we see timer[0] NOT pending.
|
||||
*/
|
||||
@ -147,7 +147,7 @@ test_timer_racecond(void)
|
||||
unsigned lcore_id;
|
||||
unsigned i;
|
||||
|
||||
master = lcore_id = rte_lcore_id();
|
||||
main_lcore = lcore_id = rte_lcore_id();
|
||||
hz = rte_get_timer_hz();
|
||||
|
||||
/* init and start timers */
|
||||
@ -156,8 +156,8 @@ test_timer_racecond(void)
|
||||
ret = reload_timer(&timer[i]);
|
||||
TEST_ASSERT(ret == 0, "reload_timer failed");
|
||||
|
||||
/* Distribute timers to slaves.
|
||||
* Note that we assign timer[0] to the master.
|
||||
/* Distribute timers to workers.
|
||||
* Note that we assign timer[0] to the main.
|
||||
*/
|
||||
timer_lcore_id[i] = lcore_id;
|
||||
lcore_id = rte_get_next_lcore(lcore_id, 1, 1);
|
||||
@ -167,11 +167,11 @@ test_timer_racecond(void)
|
||||
cur_time = rte_get_timer_cycles();
|
||||
end_time = cur_time + (hz * TEST_DURATION_S);
|
||||
|
||||
/* start slave cores */
|
||||
stop_slaves = 0;
|
||||
/* start worker cores */
|
||||
stop_workers = 0;
|
||||
printf("Start timer manage race condition test (%u seconds)\n",
|
||||
TEST_DURATION_S);
|
||||
rte_eal_mp_remote_launch(slave_main_loop, NULL, SKIP_MASTER);
|
||||
rte_eal_mp_remote_launch(worker_main_loop, NULL, SKIP_MAIN);
|
||||
|
||||
while (diff >= 0) {
|
||||
/* run the timers */
|
||||
@ -184,9 +184,9 @@ test_timer_racecond(void)
|
||||
diff = end_time - cur_time;
|
||||
}
|
||||
|
||||
/* stop slave cores */
|
||||
/* stop worker cores */
|
||||
printf("Stopping timer manage race condition test\n");
|
||||
stop_slaves = 1;
|
||||
stop_workers = 1;
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
/* stop timers */
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define launch_proc(ARGV) process_dup(ARGV, RTE_DIM(ARGV), __func__)
|
||||
|
||||
struct test_info {
|
||||
unsigned int mstr_lcore;
|
||||
unsigned int main_lcore;
|
||||
unsigned int mgr_lcore;
|
||||
unsigned int sec_lcore;
|
||||
uint32_t timer_data_id;
|
||||
@ -137,12 +137,12 @@ test_timer_secondary(void)
|
||||
TEST_ASSERT_SUCCESS(ret, "Failed to allocate timer data "
|
||||
"instance");
|
||||
|
||||
unsigned int *mstr_lcorep = &test_info->mstr_lcore;
|
||||
unsigned int *main_lcorep = &test_info->main_lcore;
|
||||
unsigned int *mgr_lcorep = &test_info->mgr_lcore;
|
||||
unsigned int *sec_lcorep = &test_info->sec_lcore;
|
||||
|
||||
*mstr_lcorep = rte_get_master_lcore();
|
||||
*mgr_lcorep = rte_get_next_lcore(*mstr_lcorep, 1, 1);
|
||||
*main_lcorep = rte_get_main_lcore();
|
||||
*mgr_lcorep = rte_get_next_lcore(*main_lcorep, 1, 1);
|
||||
*sec_lcorep = rte_get_next_lcore(*mgr_lcorep, 1, 1);
|
||||
|
||||
ret = rte_eal_remote_launch(timer_manage_loop,
|
||||
|
@ -132,7 +132,7 @@ run_test(const char *str, lcore_function_t f, struct test_data *data, size_t sz)
|
||||
|
||||
memset(data, 0, sz);
|
||||
data->nb_workers = rte_lcore_count() - 1;
|
||||
RTE_LCORE_FOREACH_SLAVE(id)
|
||||
RTE_LCORE_FOREACH_WORKER(id)
|
||||
rte_eal_remote_launch(f, &data->ldata[worker++], id);
|
||||
|
||||
wait_till_workers_are_ready(data);
|
||||
@ -140,7 +140,7 @@ run_test(const char *str, lcore_function_t f, struct test_data *data, size_t sz)
|
||||
measure_perf(str, data);
|
||||
signal_workers_to_finish(data);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(id)
|
||||
RTE_LCORE_FOREACH_WORKER(id)
|
||||
rte_eal_wait_lcore(id);
|
||||
}
|
||||
|
||||
|
@ -362,7 +362,7 @@ For example:
|
||||
typedef int (lcore_function_t)(void *);
|
||||
|
||||
/* launch a function of lcore_function_t type */
|
||||
int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id);
|
||||
int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned worker_id);
|
||||
|
||||
|
||||
C Indentation
|
||||
|
@ -42,13 +42,13 @@ I am running a 32-bit DPDK application on a NUMA system, and sometimes the appli
|
||||
If your system has a lot (>1 GB size) of hugepage memory, not all of it will be allocated.
|
||||
Due to hugepages typically being allocated on a local NUMA node, the hugepages allocation the application gets during the initialization depends on which
|
||||
NUMA node it is running on (the EAL does not affinitize cores until much later in the initialization process).
|
||||
Sometimes, the Linux OS runs the DPDK application on a core that is located on a different NUMA node from DPDK master core and
|
||||
Sometimes, the Linux OS runs the DPDK application on a core that is located on a different NUMA node from DPDK main core and
|
||||
therefore all the hugepages are allocated on the wrong socket.
|
||||
|
||||
To avoid this scenario, either lower the amount of hugepage memory available to 1 GB size (or less), or run the application with taskset
|
||||
affinitizing the application to a would-be master core.
|
||||
affinitizing the application to a would-be main core.
|
||||
|
||||
For example, if your EAL coremask is 0xff0, the master core will usually be the first core in the coremask (0x10); this is what you have to supply to taskset::
|
||||
For example, if your EAL coremask is 0xff0, the main core will usually be the first core in the coremask (0x10); this is what you have to supply to taskset::
|
||||
|
||||
taskset 0x10 ./l2fwd -l 4-11 -n 2
|
||||
|
||||
|
@ -312,7 +312,7 @@ Custom worker function :numref:`dtg_distributor_worker`.
|
||||
cores.
|
||||
|
||||
* For high-performance execution logic ensure running it on correct NUMA
|
||||
and non-master core.
|
||||
and worker core.
|
||||
|
||||
* Analyze run logic with ``rte_dump_stack`` and
|
||||
``rte_memdump`` for more insights.
|
||||
|
@ -33,9 +33,9 @@ Lcore-related options
|
||||
At a given instance only one core option ``--lcores``, ``-l`` or ``-c`` can
|
||||
be used.
|
||||
|
||||
* ``--master-lcore <core ID>``
|
||||
* ``--main-lcore <core ID>``
|
||||
|
||||
Core ID that is used as master.
|
||||
Core ID that is used as main.
|
||||
|
||||
* ``-s <service core mask>``
|
||||
|
||||
|
@ -232,7 +232,7 @@ The following selection demonstrates the launch of the test application to run o
|
||||
EAL: coremask set to 1
|
||||
EAL: Detected lcore 0 on socket 0
|
||||
...
|
||||
EAL: Master core 0 is ready (tid=1b2ad720)
|
||||
EAL: Main core 0 is ready (tid=1b2ad720)
|
||||
RTE>>
|
||||
|
||||
Applications
|
||||
@ -294,7 +294,7 @@ the logical core layout of the platform should be determined when selecting a co
|
||||
EAL: Virtual area found at 0x7f0a5c000000 (size = 0x200000)
|
||||
EAL: Requesting 1024 pages of size 2MB from socket 0
|
||||
EAL: Requesting 1024 pages of size 2MB from socket 1
|
||||
EAL: Master core 0 is ready (tid=de25b700)
|
||||
EAL: Main core 0 is ready (tid=de25b700)
|
||||
EAL: Core 1 is ready (tid=5b7fe700)
|
||||
EAL: Core 3 is ready (tid=5a7fc700)
|
||||
EAL: Core 2 is ready (tid=5affd700)
|
||||
|
@ -385,7 +385,7 @@ The application enables multiple TX and RX queues when it is started.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
testpmd -l 1,3,5 --master-lcore 1 --txq=2 –rxq=2 --nb-cores=2
|
||||
testpmd -l 1,3,5 --main-lcore 1 --txq=2 –rxq=2 --nb-cores=2
|
||||
|
||||
**TSS**
|
||||
|
||||
|
@ -64,7 +64,7 @@ It consist of calls to the pthread library (more specifically, pthread_self(), p
|
||||
.. note::
|
||||
|
||||
Initialization of objects, such as memory zones, rings, memory pools, lpm tables and hash tables,
|
||||
should be done as part of the overall application initialization on the master lcore.
|
||||
should be done as part of the overall application initialization on the main lcore.
|
||||
The creation and initialization functions for these objects are not multi-thread safe.
|
||||
However, once initialized, the objects themselves can safely be used in multiple threads simultaneously.
|
||||
|
||||
@ -186,7 +186,7 @@ very dependent on the memory allocation patterns of the application.
|
||||
|
||||
Additional restrictions are present when running in 32-bit mode. In dynamic
|
||||
memory mode, by default maximum of 2 gigabytes of VA space will be preallocated,
|
||||
and all of it will be on master lcore NUMA node unless ``--socket-mem`` flag is
|
||||
and all of it will be on main lcore NUMA node unless ``--socket-mem`` flag is
|
||||
used.
|
||||
|
||||
In legacy mode, VA space will only be preallocated for segments that were
|
||||
@ -641,7 +641,7 @@ controlled with tools like taskset (Linux) or cpuset (FreeBSD),
|
||||
- with affinity restricted to 2-4, the Control Threads will end up on
|
||||
CPU 4.
|
||||
- with affinity restricted to 2-3, the Control Threads will end up on
|
||||
CPU 2 (master lcore, which is the default when no CPU is available).
|
||||
CPU 2 (main lcore, which is the default when no CPU is available).
|
||||
|
||||
.. _known_issue_label:
|
||||
|
||||
|
@ -172,7 +172,7 @@ converts the received packets to events in the same manner as packets
|
||||
received on a polled Rx queue. The interrupt thread is affinitized to the same
|
||||
CPUs as the lcores of the Rx adapter service function, if the Rx adapter
|
||||
service function has not been mapped to any lcores, the interrupt thread
|
||||
is mapped to the master lcore.
|
||||
is mapped to the main lcore.
|
||||
|
||||
Rx Callback for SW Rx Adapter
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -124,10 +124,13 @@ LAN
|
||||
LPM
|
||||
Longest Prefix Match
|
||||
|
||||
master lcore
|
||||
main lcore
|
||||
The execution unit that executes the main() function and that launches
|
||||
other lcores.
|
||||
|
||||
master lcore
|
||||
Deprecated name for *main lcore*. No longer used.
|
||||
|
||||
mbuf
|
||||
An mbuf is a data structure used internally to carry messages (mainly
|
||||
network packets). The name is derived from BSD stacks. To understand the
|
||||
@ -185,7 +188,7 @@ Rx
|
||||
Reception
|
||||
|
||||
Slave lcore
|
||||
Any *lcore* that is not the *master lcore*.
|
||||
Deprecated name for *worker lcore*. No longer used.
|
||||
|
||||
Socket
|
||||
A physical CPU, that includes several *cores*.
|
||||
@ -237,6 +240,9 @@ VLAN
|
||||
Wr
|
||||
Write
|
||||
|
||||
Worker lcore
|
||||
Any *lcore* that is not the *main lcore*.
|
||||
|
||||
WRED
|
||||
Weighted Random Early Detection
|
||||
|
||||
|
@ -170,7 +170,7 @@
|
||||
sodipodi:role="line"
|
||||
id="tspan3165"
|
||||
x="114.71806"
|
||||
y="46.6479">Master lcore</tspan></text>
|
||||
y="46.6479">main lcore</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-size:20px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;font-family:Bitstream Vera Sans"
|
||||
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
@ -28,25 +28,6 @@ Deprecation Notices
|
||||
* kvargs: The function ``rte_kvargs_process`` will get a new parameter
|
||||
for returning key match count. It will ease handling of no-match case.
|
||||
|
||||
* eal: To be more inclusive in choice of naming, the DPDK project
|
||||
will replace uses of master/slave in the API's and command line arguments.
|
||||
|
||||
References to master/slave in relation to lcore will be renamed
|
||||
to initial/worker. The function ``rte_get_master_lcore()``
|
||||
will be renamed to ``rte_get_initial_lcore()``.
|
||||
For the 20.11 release, both names will be present and the
|
||||
old function will be marked with the deprecated tag.
|
||||
The old function will be removed in a future version.
|
||||
|
||||
The iterator for worker lcores will also change:
|
||||
``RTE_LCORE_FOREACH_SLAVE`` will be replaced with
|
||||
``RTE_LCORE_FOREACH_WORKER``.
|
||||
|
||||
The ``master-lcore`` argument to testpmd will be replaced
|
||||
with ``initial-lcore``. The old ``master-lcore`` argument
|
||||
will produce a runtime notification in 20.11 release, and
|
||||
be removed completely in a future release.
|
||||
|
||||
* eal: The terms blacklist and whitelist to describe devices used
|
||||
by DPDK will be replaced in the 20.11 relase.
|
||||
This will apply to command line arguments as well as macros.
|
||||
|
@ -384,6 +384,13 @@ API Changes
|
||||
The information provided by these macros is available through standard
|
||||
compiler macros.
|
||||
|
||||
* eal: Replaced the function ``rte_get_master_lcore()`` to
|
||||
``rte_get_main_lcore()``. The old function is deprecated.
|
||||
|
||||
The iterator for worker lcores is also changed:
|
||||
``RTE_LCORE_FOREACH_SLAVE`` is replaced with
|
||||
``RTE_LCORE_FOREACH_WORKER``.
|
||||
|
||||
* eal: The ``rte_logs`` struct and global symbol was made private
|
||||
and is no longer part of the API.
|
||||
|
||||
|
@ -94,7 +94,7 @@ device gets linked to a corresponding ethernet port as whitelisted by
|
||||
the parameter -w.
|
||||
3 cores are allocated to the application, and assigned as:
|
||||
|
||||
- core 3 is the master and used to print the stats live on screen,
|
||||
- core 3 is the main and used to print the stats live on screen,
|
||||
|
||||
- core 4 is the encoding lcore performing Rx and Turbo Encode operations
|
||||
|
||||
|
@ -64,8 +64,8 @@ Explanation
|
||||
-----------
|
||||
|
||||
The sample program has two parts: A background `packet reflector`_
|
||||
that runs on a slave core, and a foreground `Ethtool Shell`_ that
|
||||
runs on the master core. These are described below.
|
||||
that runs on a worker core, and a foreground `Ethtool Shell`_ that
|
||||
runs on the main core. These are described below.
|
||||
|
||||
Packet Reflector
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
@ -1,4 +1,4 @@
|
||||
.. SPDX-License-Identifier: BSD-3-Clause
|
||||
o.. SPDX-License-Identifier: BSD-3-Clause
|
||||
Copyright(c) 2010-2014 Intel Corporation.
|
||||
|
||||
Hello World Sample Application
|
||||
@ -75,13 +75,13 @@ The code that launches the function on each lcore is as follows:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/* call lcore_hello() on every slave lcore */
|
||||
/* call lcore_hello() on every worker lcore */
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
rte_eal_remote_launch(lcore_hello, NULL, lcore_id);
|
||||
}
|
||||
|
||||
/* call it on master lcore too */
|
||||
/* call it on main lcore too */
|
||||
|
||||
lcore_hello(NULL);
|
||||
|
||||
@ -89,6 +89,6 @@ The following code is equivalent and simpler:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
rte_eal_mp_remote_launch(lcore_hello, NULL, CALL_MASTER);
|
||||
rte_eal_mp_remote_launch(lcore_hello, NULL, CALL_MAIN);
|
||||
|
||||
Refer to the *DPDK API Reference* for detailed information on the rte_eal_mp_remote_launch() function.
|
||||
|
@ -69,13 +69,13 @@ provided parameters. The app can use up to 2 lcores: one of them receives
|
||||
incoming traffic and makes a copy of each packet. The second lcore then
|
||||
updates MAC address and sends the copy. If one lcore per port is used,
|
||||
both operations are done sequentially. For each configuration an additional
|
||||
lcore is needed since the master lcore does not handle traffic but is
|
||||
lcore is needed since the main lcore does not handle traffic but is
|
||||
responsible for configuration, statistics printing and safe shutdown of
|
||||
all ports and devices.
|
||||
|
||||
The application can use a maximum of 8 ports.
|
||||
|
||||
To run the application in a Linux environment with 3 lcores (the master lcore,
|
||||
To run the application in a Linux environment with 3 lcores (the main lcore,
|
||||
plus two forwarding cores), a single port (port 0), software copying and MAC
|
||||
updating issue the command:
|
||||
|
||||
@ -83,7 +83,7 @@ updating issue the command:
|
||||
|
||||
$ ./build/ioatfwd -l 0-2 -n 2 -- -p 0x1 --mac-updating -c sw
|
||||
|
||||
To run the application in a Linux environment with 2 lcores (the master lcore,
|
||||
To run the application in a Linux environment with 2 lcores (the main lcore,
|
||||
plus one forwarding core), 2 ports (ports 0 and 1), hardware copying and no MAC
|
||||
updating issue the command:
|
||||
|
||||
@ -208,7 +208,7 @@ After that each port application assigns resources needed.
|
||||
cfg.nb_lcores = rte_lcore_count() - 1;
|
||||
if (cfg.nb_lcores < 1)
|
||||
rte_exit(EXIT_FAILURE,
|
||||
"There should be at least one slave lcore.\n");
|
||||
"There should be at least one worker lcore.\n");
|
||||
|
||||
ret = 0;
|
||||
|
||||
@ -310,9 +310,9 @@ If initialization is successful, memory for hardware device
|
||||
statistics is allocated.
|
||||
|
||||
Finally ``main()`` function starts all packet handling lcores and starts
|
||||
printing stats in a loop on the master lcore. The application can be
|
||||
interrupted and closed using ``Ctrl-C``. The master lcore waits for
|
||||
all slave processes to finish, deallocates resources and exits.
|
||||
printing stats in a loop on the main lcore. The application can be
|
||||
interrupted and closed using ``Ctrl-C``. The main lcore waits for
|
||||
all worker lcores to finish, deallocates resources and exits.
|
||||
|
||||
The processing lcores launching function are described below.
|
||||
|
||||
|
@ -122,7 +122,7 @@ is displayed and the application is terminated.
|
||||
Run-time
|
||||
~~~~~~~~
|
||||
|
||||
The master thread is creating and managing all the application objects based on CLI input.
|
||||
The main thread is creating and managing all the application objects based on CLI input.
|
||||
|
||||
Each data plane thread runs one or several pipelines previously assigned to it in round-robin order. Each data plane thread
|
||||
executes two tasks in time-sharing mode:
|
||||
@ -130,7 +130,7 @@ executes two tasks in time-sharing mode:
|
||||
1. *Packet processing task*: Process bursts of input packets read from the pipeline input ports.
|
||||
|
||||
2. *Message handling task*: Periodically, the data plane thread pauses the packet processing task and polls for request
|
||||
messages send by the master thread. Examples: add/remove pipeline to/from current data plane thread, add/delete rules
|
||||
messages send by the main thread. Examples: add/remove pipeline to/from current data plane thread, add/delete rules
|
||||
to/from given table of a specific pipeline owned by the current data plane thread, read statistics, etc.
|
||||
|
||||
Examples
|
||||
|
@ -16,7 +16,7 @@ Overview
|
||||
--------
|
||||
|
||||
The application demonstrates how to protect against 'silent outages'
|
||||
on packet processing cores. A Keep Alive Monitor Agent Core (master)
|
||||
on packet processing cores. A Keep Alive Monitor Agent Core (main)
|
||||
monitors the state of packet processing cores (worker cores) by
|
||||
dispatching pings at a regular time interval (default is 5ms) and
|
||||
monitoring the state of the cores. Cores states are: Alive, MIA, Dead
|
||||
|
@ -630,8 +630,8 @@ not many packets to send, however it improves performance:
|
||||
|
||||
/* if timer has reached its timeout */
|
||||
if (unlikely(timer_tsc >= timer_period)) {
|
||||
/* do this only on master core */
|
||||
if (lcore_id == rte_get_master_lcore()) {
|
||||
/* do this only on main core */
|
||||
if (lcore_id == rte_get_main_lcore()) {
|
||||
print_stats();
|
||||
/* reset the timer */
|
||||
timer_tsc = 0;
|
||||
|
@ -453,9 +453,8 @@ however it improves performance:
|
||||
/* if timer has reached its timeout */
|
||||
|
||||
if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
|
||||
/* do this only on master core */
|
||||
|
||||
if (lcore_id == rte_get_master_lcore()) {
|
||||
/* do this only on main core */
|
||||
if (lcore_id == rte_get_main_lcore()) {
|
||||
print_stats();
|
||||
|
||||
/* reset the timer */
|
||||
|
@ -22,7 +22,7 @@ Run-time path is main thing that differs from L3 forwarding sample application.
|
||||
Difference is that forwarding logic starting from Rx, followed by LPM lookup,
|
||||
TTL update and finally Tx is implemented inside graph nodes. These nodes are
|
||||
interconnected in graph framework. Application main loop needs to walk over
|
||||
graph using ``rte_graph_walk()`` with graph objects created one per slave lcore.
|
||||
graph using ``rte_graph_walk()`` with graph objects created one per worker lcore.
|
||||
|
||||
The lookup method is as per implementation of ``ip4_lookup`` graph node.
|
||||
The ID of the output interface for the input packet is the next hop returned by
|
||||
@ -265,7 +265,7 @@ headers will be provided run-time using ``rte_node_ip4_route_add()`` and
|
||||
Since currently ``ip4_lookup`` and ``ip4_rewrite`` nodes don't support
|
||||
lock-less mechanisms(RCU, etc) to add run-time forwarding data like route and
|
||||
rewrite data, forwarding data is added before packet processing loop is
|
||||
launched on slave lcore.
|
||||
launched on worker lcore.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@ -297,7 +297,7 @@ Packet Forwarding using Graph Walk
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Now that all the device configurations are done, graph creations are done and
|
||||
forwarding data is updated with nodes, slave lcores will be launched with graph
|
||||
forwarding data is updated with nodes, worker lcores will be launched with graph
|
||||
main loop. Graph main loop is very simple in the sense that it needs to
|
||||
continuously call a non-blocking API ``rte_graph_walk()`` with it's lcore
|
||||
specific graph object that was already created.
|
||||
|
@ -441,7 +441,7 @@ The telemetry mode support for ``l3fwd-power`` is a standalone mode, in this mod
|
||||
``l3fwd-power`` does simple l3fwding along with calculating empty polls, full polls,
|
||||
and busy percentage for each forwarding core. The aggregation of these
|
||||
values of all cores is reported as application level telemetry to metric
|
||||
library for every 500ms from the master core.
|
||||
library for every 500ms from the main core.
|
||||
|
||||
The busy percentage is calculated by recording the poll_count
|
||||
and when the count reaches a defined value the total
|
||||
|
@ -401,9 +401,8 @@ However, it improves performance:
|
||||
/* if timer has reached its timeout */
|
||||
|
||||
if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
|
||||
/* do this only on master core */
|
||||
|
||||
if (lcore_id == rte_get_master_lcore()) {
|
||||
/* do this only on main core */
|
||||
if (lcore_id == rte_get_main_lcore()) {
|
||||
print_stats();
|
||||
|
||||
/* reset the timer */
|
||||
|
@ -64,7 +64,8 @@ The process should start successfully and display a command prompt as follows:
|
||||
EAL: Virtual area found at 0x7ff200000000 (size = 0x40000000)
|
||||
...
|
||||
|
||||
EAL: Master core 0 is ready (tid=54e41820)
|
||||
EAL: check module finished
|
||||
EAL: Main core 0 is ready (tid=54e41820)
|
||||
EAL: Core 1 is ready (tid=53b32700)
|
||||
|
||||
Starting core 1
|
||||
@ -90,7 +91,7 @@ At any stage, either process can be terminated using the quit command.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
EAL: Master core 10 is ready (tid=b5f89820) EAL: Master core 8 is ready (tid=864a3820)
|
||||
EAL: Main core 10 is ready (tid=b5f89820) EAL: Main core 8 is ready (tid=864a3820)
|
||||
EAL: Core 11 is ready (tid=84ffe700) EAL: Core 9 is ready (tid=85995700)
|
||||
Starting core 11 Starting core 9
|
||||
simple_mp > send hello_secondary simple_mp > core 9: Received 'hello_secondary'
|
||||
@ -271,7 +272,7 @@ In addition to the EAL parameters, the application- specific parameters are:
|
||||
|
||||
.. note::
|
||||
|
||||
In the server process, a single thread, the master thread, that is, the lowest numbered lcore in the coremask/corelist, performs all packet I/O.
|
||||
In the server process, a single thread, the main thread, that is, the lowest numbered lcore in the coremask/corelist, performs all packet I/O.
|
||||
If a coremask/corelist is specified with more than a single lcore bit set in it,
|
||||
an additional lcore will be used for a thread to periodically print packet count statistics.
|
||||
|
||||
|
@ -12,14 +12,14 @@ Overview
|
||||
|
||||
The application uses at least three CPU cores:
|
||||
|
||||
* RX core (maser core) receives traffic from the NIC ports and feeds Worker
|
||||
* RX core (main core) receives traffic from the NIC ports and feeds Worker
|
||||
cores with traffic through SW queues.
|
||||
|
||||
* Worker core (slave core) basically do some light work on the packet.
|
||||
* Worker (worker core) basically do some light work on the packet.
|
||||
Currently it modifies the output port of the packet for configurations with
|
||||
more than one port enabled.
|
||||
|
||||
* TX Core (slave core) receives traffic from Worker cores through software queues,
|
||||
* TX Core (worker core) receives traffic from Worker cores through software queues,
|
||||
inserts out-of-order packets into reorder buffer, extracts ordered packets
|
||||
from the reorder buffer and sends them to the NIC ports for transmission.
|
||||
|
||||
@ -46,7 +46,7 @@ The application execution command line is:
|
||||
./packet_ordering [EAL options] -- -p PORTMASK [--disable-reorder] [--insight-worker]
|
||||
|
||||
The -c EAL CPU_COREMASK option has to contain at least 3 CPU cores.
|
||||
The first CPU core in the core mask is the master core and would be assigned to
|
||||
The first CPU core in the core mask is the main core and would be assigned to
|
||||
RX core, the last to TX core and the rest to Worker cores.
|
||||
|
||||
The PORTMASK parameter must contain either 1 or even enabled port numbers.
|
||||
|
@ -280,8 +280,8 @@ functionality into different threads, and the pairs of RX and TX threads are
|
||||
interconnected via software rings.
|
||||
|
||||
On initialization an L-thread scheduler is started on every EAL thread. On all
|
||||
but the master EAL thread only a dummy L-thread is initially started.
|
||||
The L-thread started on the master EAL thread then spawns other L-threads on
|
||||
but the main EAL thread only a dummy L-thread is initially started.
|
||||
The L-thread started on the main EAL thread then spawns other L-threads on
|
||||
different L-thread schedulers according the command line parameters.
|
||||
|
||||
The RX threads poll the network interface queues and post received packets
|
||||
@ -1217,5 +1217,5 @@ Setting ``LTHREAD_DIAG`` also enables counting of statistics about cache and
|
||||
queue usage, and these statistics can be displayed by calling the function
|
||||
``lthread_diag_stats_display()``. This function also performs a consistency
|
||||
check on the caches and queues. The function should only be called from the
|
||||
master EAL thread after all slave threads have stopped and returned to the C
|
||||
main EAL thread after all worker threads have stopped and returned to the C
|
||||
main program, otherwise the consistency check will fail.
|
||||
|
@ -21,7 +21,7 @@ The PTP sample application is intended as a simple reference implementation of
|
||||
a PTP client using the DPDK IEEE1588 API.
|
||||
In order to keep the application simple the following assumptions are made:
|
||||
|
||||
* The first discovered master is the master for the session.
|
||||
* The first discovered master is the main for the session.
|
||||
* Only L2 PTP packets are supported.
|
||||
* Only the PTP v2 protocol is supported.
|
||||
* Only the slave clock is implemented.
|
||||
|
@ -71,7 +71,7 @@ Optional application parameters include:
|
||||
In this mode, the application shows a command line that can be used for obtaining statistics while
|
||||
scheduling is taking place (see interactive mode below for more information).
|
||||
|
||||
* --mst n: Master core index (the default value is 1).
|
||||
* --mnc n: Main core index (the default value is 1).
|
||||
|
||||
* --rsz "A, B, C": Ring sizes:
|
||||
|
||||
@ -329,7 +329,7 @@ Another example with 2 packet flow configurations using different ports but shar
|
||||
Note that independent cores for the packet flow configurations for each of the RX, WT and TX thread are also supported,
|
||||
providing flexibility to balance the work.
|
||||
|
||||
The EAL coremask/corelist is constrained to contain the default mastercore 1 and the RX, WT and TX cores only.
|
||||
The EAL coremask/corelist is constrained to contain the default main core 1 and the RX, WT and TX cores only.
|
||||
|
||||
Explanation
|
||||
-----------
|
||||
|
@ -48,18 +48,18 @@ In addition to EAL initialization, the timer subsystem must be initialized, by c
|
||||
|
||||
rte_timer_subsystem_init();
|
||||
|
||||
After timer creation (see the next paragraph),
|
||||
the main loop is executed on each slave lcore using the well-known rte_eal_remote_launch() and also on the master.
|
||||
After timer creation (see the next paragraph), the main loop is
|
||||
executed on each worker lcore using the well-known
|
||||
rte_eal_remote_launch() and also on the main.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/* call lcore_mainloop() on every slave lcore */
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
/* call lcore_mainloop() on every worker lcore */
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
|
||||
}
|
||||
|
||||
/* call it on master lcore too */
|
||||
/* call it on main lcore too */
|
||||
|
||||
(void) lcore_mainloop(NULL);
|
||||
|
||||
@ -105,7 +105,7 @@ This call to rte_timer_init() is necessary before doing any other operation on t
|
||||
|
||||
Then, the two timers are configured:
|
||||
|
||||
* The first timer (timer0) is loaded on the master lcore and expires every second.
|
||||
* The first timer (timer0) is loaded on the main lcore and expires every second.
|
||||
Since the PERIODICAL flag is provided, the timer is reloaded automatically by the timer subsystem.
|
||||
The callback function is timer0_cb().
|
||||
|
||||
@ -115,7 +115,7 @@ Then, the two timers are configured:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/* load timer0, every second, on master lcore, reloaded automatically */
|
||||
/* load timer0, every second, on main lcore, reloaded automatically */
|
||||
|
||||
hz = rte_get_hpet_hz();
|
||||
|
||||
|
@ -71,7 +71,7 @@ The command line options are:
|
||||
* ``--coremask=0xXX``
|
||||
|
||||
Set the hexadecimal bitmask of the cores running the packet forwarding test.
|
||||
The master lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
|
||||
The main lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
|
||||
|
||||
* ``--portmask=0xXX``
|
||||
|
||||
|
@ -724,7 +724,7 @@ This is equivalent to the ``--coremask`` command-line option.
|
||||
|
||||
.. note::
|
||||
|
||||
The master lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
|
||||
The main lcore is reserved for command line parsing only and cannot be masked on for packet forwarding.
|
||||
|
||||
set portmask
|
||||
~~~~~~~~~~~~
|
||||
|
@ -257,7 +257,7 @@ int rte_dpaa_portal_init(void *arg)
|
||||
BUS_INIT_FUNC_TRACE();
|
||||
|
||||
if ((size_t)arg == 1 || lcore == LCORE_ID_ANY)
|
||||
lcore = rte_get_master_lcore();
|
||||
lcore = rte_get_main_lcore();
|
||||
else
|
||||
if (lcore >= RTE_MAX_LCORE)
|
||||
return -1;
|
||||
|
@ -501,8 +501,8 @@ wait_workers_to_join(int lcore, const rte_atomic32_t *count)
|
||||
|
||||
|
||||
static int
|
||||
launch_workers_and_wait(int (*master_worker)(void *),
|
||||
int (*slave_workers)(void *), uint32_t total_events,
|
||||
launch_workers_and_wait(int (*main_worker)(void *),
|
||||
int (*workers)(void *), uint32_t total_events,
|
||||
uint8_t nb_workers, uint8_t sched_type)
|
||||
{
|
||||
uint8_t port = 0;
|
||||
@ -537,9 +537,9 @@ launch_workers_and_wait(int (*master_worker)(void *),
|
||||
|
||||
w_lcore = rte_get_next_lcore(
|
||||
/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
|
||||
rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
|
||||
|
||||
for (port = 1; port < nb_workers; port++) {
|
||||
param[port].total_events = &atomic_total_events;
|
||||
@ -548,7 +548,7 @@ launch_workers_and_wait(int (*master_worker)(void *),
|
||||
param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
|
||||
rte_smp_wmb();
|
||||
w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
|
||||
rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
|
||||
rte_eal_remote_launch(workers, ¶m[port], w_lcore);
|
||||
}
|
||||
|
||||
ret = wait_workers_to_join(w_lcore, &atomic_total_events);
|
||||
|
@ -601,8 +601,8 @@ wait_workers_to_join(int lcore, const rte_atomic32_t *count)
|
||||
|
||||
|
||||
static inline int
|
||||
launch_workers_and_wait(int (*master_worker)(void *),
|
||||
int (*slave_workers)(void *), uint32_t total_events,
|
||||
launch_workers_and_wait(int (*main_worker)(void *),
|
||||
int (*worker)(void *), uint32_t total_events,
|
||||
uint8_t nb_workers, uint8_t sched_type)
|
||||
{
|
||||
uint8_t port = 0;
|
||||
@ -637,9 +637,9 @@ launch_workers_and_wait(int (*master_worker)(void *),
|
||||
|
||||
w_lcore = rte_get_next_lcore(
|
||||
/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
|
||||
rte_eal_remote_launch(main_worker, ¶m[0], w_lcore);
|
||||
|
||||
for (port = 1; port < nb_workers; port++) {
|
||||
param[port].total_events = &atomic_total_events;
|
||||
@ -648,7 +648,7 @@ launch_workers_and_wait(int (*master_worker)(void *),
|
||||
param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
|
||||
rte_smp_wmb();
|
||||
w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
|
||||
rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
|
||||
rte_eal_remote_launch(worker, ¶m[port], w_lcore);
|
||||
}
|
||||
|
||||
ret = wait_workers_to_join(w_lcore, &atomic_total_events);
|
||||
|
@ -586,8 +586,8 @@ wait_workers_to_join(const rte_atomic32_t *count)
|
||||
}
|
||||
|
||||
static inline int
|
||||
launch_workers_and_wait(int (*master_worker)(void *),
|
||||
int (*slave_workers)(void *), uint32_t total_events,
|
||||
launch_workers_and_wait(int (*main_thread)(void *),
|
||||
int (*worker_thread)(void *), uint32_t total_events,
|
||||
uint8_t nb_workers, uint8_t sched_type)
|
||||
{
|
||||
rte_atomic32_t atomic_total_events;
|
||||
@ -623,9 +623,9 @@ launch_workers_and_wait(int (*master_worker)(void *),
|
||||
|
||||
w_lcore = rte_get_next_lcore(
|
||||
/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
rte_eal_remote_launch(master_worker, ¶m[0], w_lcore);
|
||||
rte_eal_remote_launch(main_thread, ¶m[0], w_lcore);
|
||||
|
||||
for (port = 1; port < nb_workers; port++) {
|
||||
param[port].total_events = &atomic_total_events;
|
||||
@ -634,7 +634,7 @@ launch_workers_and_wait(int (*master_worker)(void *),
|
||||
param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
|
||||
rte_smp_wmb();
|
||||
w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
|
||||
rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore);
|
||||
rte_eal_remote_launch(worker_thread, ¶m[port], w_lcore);
|
||||
}
|
||||
|
||||
rte_smp_wmb();
|
||||
|
@ -3106,7 +3106,7 @@ worker_loopback(struct test *t, uint8_t disable_implicit_release)
|
||||
|
||||
p_lcore = rte_get_next_lcore(
|
||||
/* start core */ -1,
|
||||
/* skip master */ 1,
|
||||
/* skip main */ 1,
|
||||
/* wrap */ 0);
|
||||
w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
|
||||
|
||||
|
@ -429,7 +429,7 @@ int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
|
||||
if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
|
||||
return 0;
|
||||
|
||||
socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
|
||||
socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
|
||||
|
||||
nqr = rte_zmalloc_socket("nqr",
|
||||
sizeof(struct bnxt_cp_ring_info),
|
||||
@ -820,7 +820,7 @@ int bnxt_alloc_async_ring_struct(struct bnxt *bp)
|
||||
if (BNXT_NUM_ASYNC_CPR(bp) == 0)
|
||||
return 0;
|
||||
|
||||
socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
|
||||
socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
|
||||
|
||||
cpr = rte_zmalloc_socket("cpr",
|
||||
sizeof(struct bnxt_cp_ring_info),
|
||||
|
@ -814,7 +814,7 @@ mrvl_flush_bpool(struct rte_eth_dev *dev)
|
||||
unsigned int core_id = rte_lcore_id();
|
||||
|
||||
if (core_id == LCORE_ID_ANY)
|
||||
core_id = rte_get_master_lcore();
|
||||
core_id = rte_get_main_lcore();
|
||||
|
||||
hif = mrvl_get_hif(priv, core_id);
|
||||
|
||||
@ -1623,7 +1623,7 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
|
||||
|
||||
core_id = rte_lcore_id();
|
||||
if (core_id == LCORE_ID_ANY)
|
||||
core_id = rte_get_master_lcore();
|
||||
core_id = rte_get_main_lcore();
|
||||
|
||||
hif = mrvl_get_hif(rxq->priv, core_id);
|
||||
if (!hif)
|
||||
@ -1773,7 +1773,7 @@ mrvl_rx_queue_release(void *rxq)
|
||||
unsigned int core_id = rte_lcore_id();
|
||||
|
||||
if (core_id == LCORE_ID_ANY)
|
||||
core_id = rte_get_master_lcore();
|
||||
core_id = rte_get_main_lcore();
|
||||
|
||||
if (!q)
|
||||
return;
|
||||
|
@ -143,7 +143,7 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
|
||||
snprintf(mz_name, sizeof(mz_name), "%lx",
|
||||
(unsigned long)rte_get_timer_cycles());
|
||||
if (core_id == (unsigned int)LCORE_ID_ANY)
|
||||
core_id = rte_get_master_lcore();
|
||||
core_id = rte_get_main_lcore();
|
||||
socket_id = rte_lcore_to_socket_id(core_id);
|
||||
mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
|
||||
RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
|
||||
@ -182,7 +182,7 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
|
||||
snprintf(mz_name, sizeof(mz_name), "%lx",
|
||||
(unsigned long)rte_get_timer_cycles());
|
||||
if (core_id == (unsigned int)LCORE_ID_ANY)
|
||||
core_id = rte_get_master_lcore();
|
||||
core_id = rte_get_main_lcore();
|
||||
socket_id = rte_lcore_to_socket_id(core_id);
|
||||
mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
|
||||
RTE_MEMZONE_IOVA_CONTIG, align);
|
||||
|
@ -554,7 +554,7 @@ TAILQ_HEAD(pipeline_list, pipeline);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Master thead: data plane thread context
|
||||
* Main thread: data plane thread context
|
||||
*/
|
||||
struct softnic_thread {
|
||||
struct rte_ring *msgq_req;
|
||||
|
@ -18,14 +18,14 @@
|
||||
#include "rte_eth_softnic_internals.h"
|
||||
|
||||
/**
|
||||
* Master thread: data plane thread init
|
||||
* Main thread: data plane thread init
|
||||
*/
|
||||
void
|
||||
softnic_thread_free(struct pmd_internals *softnic)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(i) {
|
||||
RTE_LCORE_FOREACH_WORKER(i) {
|
||||
struct softnic_thread *t = &softnic->thread[i];
|
||||
|
||||
/* MSGQs */
|
||||
@ -78,7 +78,7 @@ softnic_thread_init(struct pmd_internals *softnic)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Master thread records */
|
||||
/* Main thread records */
|
||||
t->msgq_req = msgq_req;
|
||||
t->msgq_rsp = msgq_rsp;
|
||||
t->service_id = UINT32_MAX;
|
||||
@ -99,7 +99,7 @@ softnic_thread_init(struct pmd_internals *softnic)
|
||||
static inline int
|
||||
thread_is_valid(struct pmd_internals *softnic, uint32_t thread_id)
|
||||
{
|
||||
if (thread_id == rte_get_master_lcore())
|
||||
if (thread_id == rte_get_main_lcore())
|
||||
return 0; /* FALSE */
|
||||
|
||||
if (softnic->params.sc && rte_lcore_has_role(thread_id, ROLE_SERVICE))
|
||||
@ -209,7 +209,7 @@ pipeline_is_running(struct pipeline *p)
|
||||
}
|
||||
|
||||
/**
|
||||
* Master thread & data plane threads: message passing
|
||||
* Main thread & data plane threads: message passing
|
||||
*/
|
||||
enum thread_req_type {
|
||||
THREAD_REQ_PIPELINE_ENABLE = 0,
|
||||
@ -243,7 +243,7 @@ struct thread_msg_rsp {
|
||||
};
|
||||
|
||||
/**
|
||||
* Master thread
|
||||
* Main thread
|
||||
*/
|
||||
static struct thread_msg_req *
|
||||
thread_msg_alloc(void)
|
||||
@ -587,7 +587,7 @@ thread_msg_handle(struct softnic_thread_data *t)
|
||||
}
|
||||
|
||||
/**
|
||||
* Master thread & data plane threads: message passing
|
||||
* Main thread & data plane threads: message passing
|
||||
*/
|
||||
enum pipeline_req_type {
|
||||
/* Port IN */
|
||||
@ -753,7 +753,7 @@ struct pipeline_msg_rsp {
|
||||
};
|
||||
|
||||
/**
|
||||
* Master thread
|
||||
* Main thread
|
||||
*/
|
||||
static struct pipeline_msg_req *
|
||||
pipeline_msg_alloc(void)
|
||||
|
@ -1044,7 +1044,7 @@ main(int argc, char **argv)
|
||||
struct stats_lcore_params stats_lcore;
|
||||
struct rte_ring *enc_to_dec_ring;
|
||||
bool stats_thread_started = false;
|
||||
unsigned int master_lcore_id = rte_get_master_lcore();
|
||||
unsigned int main_lcore_id = rte_get_main_lcore();
|
||||
|
||||
rte_atomic16_init(&global_exit_flag);
|
||||
|
||||
@ -1147,9 +1147,9 @@ main(int argc, char **argv)
|
||||
stats_lcore.app_params = &app_params;
|
||||
stats_lcore.lconf = lcore_conf;
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (lcore_conf[lcore_id].core_type != 0)
|
||||
/* launch per-lcore processing loop on slave lcores */
|
||||
/* launch per-lcore processing loop on worker lcores */
|
||||
rte_eal_remote_launch(processing_loop,
|
||||
&lcore_conf[lcore_id], lcore_id);
|
||||
else if (!stats_thread_started) {
|
||||
@ -1161,15 +1161,15 @@ main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (!stats_thread_started &&
|
||||
lcore_conf[master_lcore_id].core_type != 0)
|
||||
lcore_conf[main_lcore_id].core_type != 0)
|
||||
rte_exit(EXIT_FAILURE,
|
||||
"Not enough lcores to run the statistics printing loop!");
|
||||
else if (lcore_conf[master_lcore_id].core_type != 0)
|
||||
processing_loop(&lcore_conf[master_lcore_id]);
|
||||
else if (lcore_conf[main_lcore_id].core_type != 0)
|
||||
processing_loop(&lcore_conf[main_lcore_id]);
|
||||
else if (!stats_thread_started)
|
||||
stats_loop(&stats_lcore);
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
ret |= rte_eal_wait_lcore(lcore_id);
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ static void cmd_start_parsed(__rte_unused void *parsed_result,
|
||||
struct cmdline *cl,
|
||||
__rte_unused void *data)
|
||||
{
|
||||
int slave_core_id = rte_lcore_id();
|
||||
int worker_core_id = rte_lcore_id();
|
||||
|
||||
rte_spinlock_trylock(&global_flag_stru_p->lock);
|
||||
if (global_flag_stru_p->LcoreMainIsRunning == 0) {
|
||||
@ -590,9 +590,9 @@ static void cmd_start_parsed(__rte_unused void *parsed_result,
|
||||
return;
|
||||
}
|
||||
|
||||
/* start lcore main on core != master_core - ARP response thread */
|
||||
slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
|
||||
if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
|
||||
/* start lcore main on core != main_core - ARP response thread */
|
||||
worker_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
|
||||
if ((worker_core_id >= RTE_MAX_LCORE) || (worker_core_id == 0))
|
||||
return;
|
||||
|
||||
rte_spinlock_trylock(&global_flag_stru_p->lock);
|
||||
@ -601,8 +601,8 @@ static void cmd_start_parsed(__rte_unused void *parsed_result,
|
||||
cmdline_printf(cl,
|
||||
"Starting lcore_main on core %d:%d "
|
||||
"Our IP:%d.%d.%d.%d\n",
|
||||
slave_core_id,
|
||||
rte_eal_remote_launch(lcore_main, NULL, slave_core_id),
|
||||
worker_core_id,
|
||||
rte_eal_remote_launch(lcore_main, NULL, worker_core_id),
|
||||
BOND_IP_1,
|
||||
BOND_IP_2,
|
||||
BOND_IP_3,
|
||||
@ -802,7 +802,7 @@ cmdline_parse_ctx_t main_ctx[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
/* prompt function, called from main on MASTER lcore */
|
||||
/* prompt function, called from main on MAIN lcore */
|
||||
static void prompt(__rte_unused void *arg1)
|
||||
{
|
||||
struct cmdline *cl;
|
||||
@ -818,7 +818,7 @@ static void prompt(__rte_unused void *arg1)
|
||||
int
|
||||
main(int argc, char *argv[])
|
||||
{
|
||||
int ret, slave_core_id;
|
||||
int ret, worker_core_id;
|
||||
uint16_t nb_ports, i;
|
||||
|
||||
/* init EAL */
|
||||
@ -852,23 +852,23 @@ main(int argc, char *argv[])
|
||||
rte_spinlock_init(&global_flag_stru_p->lock);
|
||||
|
||||
/* check state of lcores */
|
||||
RTE_LCORE_FOREACH_SLAVE(slave_core_id) {
|
||||
if (rte_eal_get_lcore_state(slave_core_id) != WAIT)
|
||||
RTE_LCORE_FOREACH_WORKER(worker_core_id) {
|
||||
if (rte_eal_get_lcore_state(worker_core_id) != WAIT)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* start lcore main on core != master_core - ARP response thread */
|
||||
slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
|
||||
if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
|
||||
/* start lcore main on core != main_core - ARP response thread */
|
||||
worker_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
|
||||
if ((worker_core_id >= RTE_MAX_LCORE) || (worker_core_id == 0))
|
||||
return -EPERM;
|
||||
|
||||
global_flag_stru_p->LcoreMainIsRunning = 1;
|
||||
global_flag_stru_p->LcoreMainCore = slave_core_id;
|
||||
global_flag_stru_p->LcoreMainCore = worker_core_id;
|
||||
printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
|
||||
slave_core_id,
|
||||
worker_core_id,
|
||||
rte_eal_remote_launch((lcore_function_t *)lcore_main,
|
||||
NULL,
|
||||
slave_core_id),
|
||||
worker_core_id),
|
||||
BOND_IP_1,
|
||||
BOND_IP_2,
|
||||
BOND_IP_3,
|
||||
|
@ -612,7 +612,7 @@ static int
|
||||
init_power_library(void)
|
||||
{
|
||||
int ret = 0, lcore_id;
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
/* init power management library */
|
||||
ret = rte_power_init(lcore_id);
|
||||
if (ret) {
|
||||
@ -805,7 +805,7 @@ main(int argc, char *argv[])
|
||||
* available, the higher frequency cores will go to the
|
||||
* distributor first, then rx, then tx.
|
||||
*/
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
|
||||
rte_power_get_capabilities(lcore_id, &lcore_cap);
|
||||
|
||||
@ -838,7 +838,7 @@ main(int argc, char *argv[])
|
||||
* after the high performing core assignment above, pre-assign
|
||||
* them here.
|
||||
*/
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (lcore_id == (unsigned int)distr_core_id ||
|
||||
lcore_id == (unsigned int)rx_core_id ||
|
||||
lcore_id == (unsigned int)tx_core_id)
|
||||
@ -869,7 +869,7 @@ main(int argc, char *argv[])
|
||||
* Kick off all the worker threads first, avoiding the pre-assigned
|
||||
* lcore_ids for tx, rx and distributor workloads.
|
||||
*/
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (lcore_id == (unsigned int)distr_core_id ||
|
||||
lcore_id == (unsigned int)rx_core_id ||
|
||||
lcore_id == (unsigned int)tx_core_id)
|
||||
@ -922,7 +922,7 @@ main(int argc, char *argv[])
|
||||
usleep(1000);
|
||||
}
|
||||
|
||||
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
|
||||
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
||||
if (rte_eal_wait_lcore(lcore_id) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ static void process_frame(struct app_port *ptr_port,
|
||||
rte_ether_addr_copy(&ptr_port->mac_addr, &ptr_mac_hdr->s_addr);
|
||||
}
|
||||
|
||||
static int slave_main(__rte_unused void *ptr_data)
|
||||
static int worker_main(__rte_unused void *ptr_data)
|
||||
{
|
||||
struct app_port *ptr_port;
|
||||
struct rte_mbuf *ptr_frame;
|
||||
@ -284,16 +284,17 @@ int main(int argc, char **argv)
|
||||
app_cfg.cnt_ports = cnt_ports;
|
||||
|
||||
if (rte_lcore_count() < 2)
|
||||
rte_exit(EXIT_FAILURE, "No available slave core!\n");
|
||||
/* Assume there is an available slave.. */
|
||||
rte_exit(EXIT_FAILURE, "No available worker core!\n");
|
||||
|
||||
/* Assume there is an available worker.. */
|
||||
id_core = rte_lcore_id();
|
||||
id_core = rte_get_next_lcore(id_core, 1, 1);
|
||||
rte_eal_remote_launch(slave_main, NULL, id_core);
|
||||
rte_eal_remote_launch(worker_main, NULL, id_core);
|
||||
|
||||
ethapp_main();
|
||||
|
||||
app_cfg.exit_now = 1;
|
||||
RTE_LCORE_FOREACH_SLAVE(id_core) {
|
||||
RTE_LCORE_FOREACH_WORKER(id_core) {
|
||||
if (rte_eal_wait_lcore(id_core) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user