app/eventdev: add event Rx adapter setup

Add functions to setup and configure Rx adapter based on the number of
ethdev ports setup.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
This commit is contained in:
Pavan Nikhilesh 2017-12-11 20:43:44 +05:30 committed by Jerin Jacob
parent 7f3daf3426
commit 3617aae53f
4 changed files with 121 additions and 29 deletions

View File

@ -159,6 +159,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
uint8_t queue;
uint8_t nb_queues;
uint8_t nb_ports;
struct rte_event_dev_info dev_info;
nb_ports = evt_nr_active_lcores(opt->wlcores);
nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
@ -167,13 +168,22 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
nb_queues = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
rte_eth_dev_count() : atq_nb_event_queues(opt);
memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
if (ret) {
evt_err("failed to get eventdev info %d", opt->dev_id);
return ret;
}
const struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = 4096,
.nb_events_limit = dev_info.max_num_events,
.nb_event_queue_flows = opt->nb_flows,
.nb_event_port_dequeue_depth = 128,
.nb_event_port_enqueue_depth = 128,
.nb_event_port_dequeue_depth =
dev_info.max_event_port_dequeue_depth,
.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth,
};
ret = rte_event_dev_configure(opt->dev_id, &config);
@ -197,8 +207,7 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
nb_queues);
ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues);
if (ret)
return ret;

View File

@ -203,19 +203,78 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
return 0;
}
static int
perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
struct rte_event_port_conf prod_conf)
{
int ret = 0;
uint16_t prod;
struct rte_event_eth_rx_adapter_queue_conf queue_conf;
memset(&queue_conf, 0,
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
queue_conf.ev.sched_type = opt->sched_type_list[0];
for (prod = 0; prod < rte_eth_dev_count(); prod++) {
uint32_t cap;
ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
prod, &cap);
if (ret) {
evt_err("failed to get event rx adapter[%d]"
" capabilities",
opt->dev_id);
return ret;
}
queue_conf.ev.queue_id = prod * stride;
ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
&prod_conf);
if (ret) {
evt_err("failed to create rx adapter[%d]", prod);
return ret;
}
ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
&queue_conf);
if (ret) {
evt_err("failed to add rx queues to adapter[%d]", prod);
return ret;
}
ret = rte_eth_dev_start(prod);
if (ret) {
evt_err("Ethernet dev [%d] failed to start."
" Using synthetic producer", prod);
return ret;
}
ret = rte_event_eth_rx_adapter_start(prod);
if (ret) {
evt_err("Rx adapter[%d] start failed", prod);
return ret;
}
printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__,
prod, prod);
}
return ret;
}
int
perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t stride, uint8_t nb_queues)
{
struct test_perf *t = evt_test_priv(test);
uint8_t port, prod;
uint16_t port, prod;
int ret = -1;
struct rte_event_port_conf port_conf;
memset(&port_conf, 0, sizeof(struct rte_event_port_conf));
rte_event_port_default_conf_get(opt->dev_id, 0, &port_conf);
/* port configuration */
const struct rte_event_port_conf wkr_p_conf = {
.dequeue_depth = opt->wkr_deq_dep,
.enqueue_depth = 64,
.new_event_threshold = 4096,
.enqueue_depth = port_conf.enqueue_depth,
.new_event_threshold = port_conf.new_event_threshold,
};
/* setup one port per worker, linking to all queues */
@ -243,26 +302,38 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
}
/* port for producers, no links */
const struct rte_event_port_conf prod_conf = {
.dequeue_depth = 8,
.enqueue_depth = 32,
.new_event_threshold = 1200,
struct rte_event_port_conf prod_conf = {
.dequeue_depth = port_conf.dequeue_depth,
.enqueue_depth = port_conf.enqueue_depth,
.new_event_threshold = port_conf.new_event_threshold,
};
prod = 0;
for ( ; port < perf_nb_event_ports(opt); port++) {
struct prod_data *p = &t->prod[port];
p->dev_id = opt->dev_id;
p->port_id = port;
p->queue_id = prod * stride;
p->t = t;
ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
if (ret) {
evt_err("failed to setup port %d", port);
return ret;
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
for ( ; port < perf_nb_event_ports(opt); port++) {
struct prod_data *p = &t->prod[port];
p->t = t;
}
ret = perf_event_rx_adapter_setup(opt, stride, prod_conf);
if (ret)
return ret;
} else {
prod = 0;
for ( ; port < perf_nb_event_ports(opt); port++) {
struct prod_data *p = &t->prod[port];
p->dev_id = opt->dev_id;
p->port_id = port;
p->queue_id = prod * stride;
p->t = t;
ret = rte_event_port_setup(opt->dev_id, port,
&prod_conf);
if (ret) {
evt_err("failed to setup port %d", port);
return ret;
}
prod++;
}
prod++;
}
return ret;
@ -451,6 +522,7 @@ void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
for (i = 0; i < rte_eth_dev_count(); i++) {
rte_event_eth_rx_adapter_stop(i);
rte_eth_dev_stop(i);
rte_eth_dev_close(i);
}

View File

@ -12,6 +12,7 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
#include <rte_event_eth_rx_adapter.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mempool.h>

View File

@ -156,6 +156,7 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
int ret;
int nb_ports;
int nb_queues;
struct rte_event_dev_info dev_info;
nb_ports = evt_nr_active_lcores(opt->wlcores);
nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
@ -165,13 +166,22 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
rte_eth_dev_count() * nb_stages :
perf_queue_nb_event_queues(opt);
memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
if (ret) {
evt_err("failed to get eventdev info %d", opt->dev_id);
return ret;
}
const struct rte_event_dev_config config = {
.nb_event_queues = nb_queues,
.nb_event_ports = nb_ports,
.nb_events_limit = 4096,
.nb_events_limit = dev_info.max_num_events,
.nb_event_queue_flows = opt->nb_flows,
.nb_event_port_dequeue_depth = 128,
.nb_event_port_enqueue_depth = 128,
.nb_event_port_dequeue_depth =
dev_info.max_event_port_dequeue_depth,
.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth,
};
ret = rte_event_dev_configure(opt->dev_id, &config);