2018-01-16 17:45:54 +00:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2017 Cavium, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "test_pipeline_common.h"
|
|
|
|
|
2018-01-16 17:45:56 +00:00
|
|
|
int
|
|
|
|
pipeline_test_result(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
|
|
|
RTE_SET_USED(opt);
|
|
|
|
int i;
|
|
|
|
uint64_t total = 0;
|
|
|
|
struct test_pipeline *t = evt_test_priv(test);
|
|
|
|
|
2018-09-24 08:02:18 +00:00
|
|
|
evt_info("Packet distribution across worker cores :");
|
2018-01-16 17:45:56 +00:00
|
|
|
for (i = 0; i < t->nb_workers; i++)
|
|
|
|
total += t->worker[i].processed_pkts;
|
|
|
|
for (i = 0; i < t->nb_workers; i++)
|
2018-09-24 08:02:18 +00:00
|
|
|
evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:"
|
|
|
|
CLGRN" %3.2f"CLNRM, i,
|
2018-01-16 17:45:56 +00:00
|
|
|
t->worker[i].processed_pkts,
|
|
|
|
(((double)t->worker[i].processed_pkts)/total)
|
|
|
|
* 100);
|
|
|
|
return t->result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues)
|
|
|
|
{
|
|
|
|
evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
|
|
|
|
evt_dump_worker_lcores(opt);
|
|
|
|
evt_dump_nb_stages(opt);
|
|
|
|
evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt));
|
|
|
|
evt_dump("nb_evdev_queues", "%d", nb_queues);
|
|
|
|
evt_dump_queue_priority(opt);
|
|
|
|
evt_dump_sched_type_list(opt);
|
|
|
|
evt_dump_producer_type(opt);
|
2021-03-31 09:30:00 +00:00
|
|
|
evt_dump("nb_eth_rx_queues", "%d", opt->eth_queues);
|
|
|
|
evt_dump("event_vector", "%d", opt->ena_vector);
|
|
|
|
if (opt->ena_vector) {
|
|
|
|
evt_dump("vector_size", "%d", opt->vector_size);
|
|
|
|
evt_dump("vector_tmo_ns", "%" PRIu64 "", opt->vector_tmo_nsec);
|
|
|
|
}
|
2018-01-16 17:45:56 +00:00
|
|
|
}
|
|
|
|
|
2018-01-16 17:46:00 +00:00
|
|
|
static inline uint64_t
|
|
|
|
processed_pkts(struct test_pipeline *t)
|
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
uint64_t total = 0;
|
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
for (i = 0; i < t->nb_workers; i++)
|
|
|
|
total += t->worker[i].processed_pkts;
|
2018-01-16 17:46:00 +00:00
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
|
|
|
|
int (*worker)(void *))
|
|
|
|
{
|
|
|
|
int ret, lcore_id;
|
|
|
|
struct test_pipeline *t = evt_test_priv(test);
|
|
|
|
|
|
|
|
int port_idx = 0;
|
|
|
|
/* launch workers */
|
2020-10-15 22:57:19 +00:00
|
|
|
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
2018-01-16 17:46:00 +00:00
|
|
|
if (!(opt->wlcores[lcore_id]))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = rte_eal_remote_launch(worker,
|
|
|
|
&t->worker[port_idx], lcore_id);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to launch worker %d", lcore_id);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
port_idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t perf_cycles = rte_get_timer_cycles();
|
|
|
|
const uint64_t perf_sample = rte_get_timer_hz();
|
|
|
|
|
|
|
|
static float total_mpps;
|
|
|
|
static uint64_t samples;
|
|
|
|
|
|
|
|
uint64_t prev_pkts = 0;
|
|
|
|
|
|
|
|
while (t->done == false) {
|
|
|
|
const uint64_t new_cycles = rte_get_timer_cycles();
|
|
|
|
|
|
|
|
if ((new_cycles - perf_cycles) > perf_sample) {
|
|
|
|
const uint64_t curr_pkts = processed_pkts(t);
|
|
|
|
|
|
|
|
float mpps = (float)(curr_pkts - prev_pkts)/1000000;
|
|
|
|
|
|
|
|
prev_pkts = curr_pkts;
|
|
|
|
perf_cycles = new_cycles;
|
|
|
|
total_mpps += mpps;
|
|
|
|
++samples;
|
|
|
|
printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
|
|
|
|
mpps, total_mpps/samples);
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-16 17:45:56 +00:00
|
|
|
int
|
|
|
|
pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
|
|
|
|
{
|
|
|
|
unsigned int lcores;
|
2020-10-15 22:57:19 +00:00
|
|
|
|
|
|
|
/* N worker + main */
|
2018-01-16 17:45:56 +00:00
|
|
|
lcores = 2;
|
|
|
|
|
2020-04-02 19:38:07 +00:00
|
|
|
if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) {
|
|
|
|
evt_err("Invalid producer type '%s' valid producer '%s'",
|
|
|
|
evt_prod_id_to_name(opt->prod_type),
|
|
|
|
evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-04-05 15:33:22 +00:00
|
|
|
if (!rte_eth_dev_count_avail()) {
|
2018-01-16 17:45:56 +00:00
|
|
|
evt_err("test needs minimum 1 ethernet dev");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rte_lcore_count() < lcores) {
|
|
|
|
evt_err("test need minimum %d lcores", lcores);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate worker lcores */
|
2020-10-15 22:57:19 +00:00
|
|
|
if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
|
|
|
|
evt_err("worker lcores overlaps with main lcore");
|
2018-01-16 17:45:56 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (evt_has_disabled_lcore(opt->wlcores)) {
|
|
|
|
evt_err("one or more workers lcores are not enabled");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!evt_has_active_lcore(opt->wlcores)) {
|
|
|
|
evt_err("minimum one worker is required");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nb_queues > EVT_MAX_QUEUES) {
|
|
|
|
evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) {
|
|
|
|
evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (evt_has_invalid_stage(opt))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (evt_has_invalid_sched_type(opt))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-16 17:45:57 +00:00
|
|
|
#define NB_RX_DESC 128
|
|
|
|
#define NB_TX_DESC 512
|
|
|
|
int
|
|
|
|
pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
2021-03-31 09:30:00 +00:00
|
|
|
uint16_t i, j;
|
2019-09-12 16:42:15 +00:00
|
|
|
int ret;
|
2018-01-16 17:45:57 +00:00
|
|
|
uint8_t nb_queues = 1;
|
|
|
|
struct test_pipeline *t = evt_test_priv(test);
|
|
|
|
struct rte_eth_rxconf rx_conf;
|
|
|
|
struct rte_eth_conf port_conf = {
|
|
|
|
.rxmode = {
|
2021-10-22 11:03:12 +00:00
|
|
|
.mq_mode = RTE_ETH_MQ_RX_RSS,
|
2018-01-16 17:45:57 +00:00
|
|
|
},
|
|
|
|
.rx_adv_conf = {
|
|
|
|
.rss_conf = {
|
|
|
|
.rss_key = NULL,
|
2021-10-22 11:03:12 +00:00
|
|
|
.rss_hf = RTE_ETH_RSS_IP,
|
2018-01-16 17:45:57 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-04-05 15:33:22 +00:00
|
|
|
if (!rte_eth_dev_count_avail()) {
|
2018-09-24 08:02:18 +00:00
|
|
|
evt_err("No ethernet ports found.");
|
2018-01-16 17:45:57 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2019-09-30 16:48:41 +00:00
|
|
|
if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) {
|
|
|
|
evt_err("max_pkt_sz can not be less than %d",
|
|
|
|
RTE_ETHER_MIN_LEN);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
|
|
|
|
RTE_ETHER_CRC_LEN;
|
2019-09-30 16:48:41 +00:00
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
t->internal_port = 1;
|
2018-04-05 15:33:20 +00:00
|
|
|
RTE_ETH_FOREACH_DEV(i) {
|
2018-01-16 17:45:57 +00:00
|
|
|
struct rte_eth_dev_info dev_info;
|
2018-07-04 20:02:21 +00:00
|
|
|
struct rte_eth_conf local_port_conf = port_conf;
|
2018-09-24 08:02:19 +00:00
|
|
|
uint32_t caps = 0;
|
|
|
|
|
2019-11-21 19:22:39 +00:00
|
|
|
ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps);
|
|
|
|
if (ret != 0) {
|
|
|
|
evt_err("failed to get event tx adapter[%d] caps", i);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
|
|
|
|
t->internal_port = 0;
|
2018-01-16 17:45:57 +00:00
|
|
|
|
2021-03-31 09:30:00 +00:00
|
|
|
ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, i, &caps);
|
|
|
|
if (ret != 0) {
|
|
|
|
evt_err("failed to get event tx adapter[%d] caps", i);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
|
|
|
|
local_port_conf.rxmode.offloads |=
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_RX_OFFLOAD_RSS_HASH;
|
2021-03-31 09:30:00 +00:00
|
|
|
|
2019-09-12 16:42:15 +00:00
|
|
|
ret = rte_eth_dev_info_get(i, &dev_info);
|
|
|
|
if (ret != 0) {
|
|
|
|
evt_err("Error during getting device (port %u) info: %s\n",
|
|
|
|
i, strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-10-08 18:48:46 +00:00
|
|
|
/* Enable mbuf fast free if PMD has the capability. */
|
2021-10-22 11:03:12 +00:00
|
|
|
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
|
2020-10-08 18:48:46 +00:00
|
|
|
local_port_conf.txmode.offloads |=
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
|
2020-10-08 18:48:46 +00:00
|
|
|
|
2018-01-16 17:45:57 +00:00
|
|
|
rx_conf = dev_info.default_rxconf;
|
|
|
|
rx_conf.offloads = port_conf.rxmode.offloads;
|
|
|
|
|
2018-07-04 20:02:21 +00:00
|
|
|
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
|
|
|
|
dev_info.flow_type_rss_offloads;
|
|
|
|
if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
|
|
|
|
port_conf.rx_adv_conf.rss_conf.rss_hf) {
|
|
|
|
evt_info("Port %u modified RSS hash function based on hardware support,"
|
2018-09-24 08:02:18 +00:00
|
|
|
"requested:%#"PRIx64" configured:%#"PRIx64"",
|
2018-07-04 20:02:21 +00:00
|
|
|
i,
|
|
|
|
port_conf.rx_adv_conf.rss_conf.rss_hf,
|
|
|
|
local_port_conf.rx_adv_conf.rss_conf.rss_hf);
|
|
|
|
}
|
|
|
|
|
2021-03-31 09:30:00 +00:00
|
|
|
if (rte_eth_dev_configure(i, opt->eth_queues, nb_queues,
|
|
|
|
&local_port_conf) < 0) {
|
2018-09-24 08:02:18 +00:00
|
|
|
evt_err("Failed to configure eth port [%d]", i);
|
2018-01-16 17:45:57 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-03-31 09:30:00 +00:00
|
|
|
for (j = 0; j < opt->eth_queues; j++) {
|
2021-07-01 06:07:59 +00:00
|
|
|
if (rte_eth_rx_queue_setup(
|
|
|
|
i, j, NB_RX_DESC, rte_socket_id(), &rx_conf,
|
|
|
|
opt->per_port_pool ? t->pool[i] :
|
|
|
|
t->pool[0]) < 0) {
|
2021-03-31 09:30:00 +00:00
|
|
|
evt_err("Failed to setup eth port [%d] rx_queue: %d.",
|
2018-01-16 17:45:57 +00:00
|
|
|
i, 0);
|
2021-03-31 09:30:00 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-01-16 17:45:57 +00:00
|
|
|
}
|
2021-03-31 09:30:00 +00:00
|
|
|
|
2018-01-16 17:45:57 +00:00
|
|
|
if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
|
|
|
|
rte_socket_id(), NULL) < 0) {
|
2018-09-24 08:02:18 +00:00
|
|
|
evt_err("Failed to setup eth port [%d] tx_queue: %d.",
|
2018-01-16 17:45:57 +00:00
|
|
|
i, 0);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:26 +00:00
|
|
|
ret = rte_eth_promiscuous_enable(i);
|
|
|
|
if (ret != 0) {
|
|
|
|
evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
|
|
|
|
i, rte_strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2018-01-16 17:45:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-16 17:45:58 +00:00
|
|
|
int
|
|
|
|
pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
|
|
|
|
uint8_t *queue_arr, uint8_t nb_queues,
|
|
|
|
const struct rte_event_port_conf p_conf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
uint8_t port;
|
|
|
|
struct test_pipeline *t = evt_test_priv(test);
|
|
|
|
|
|
|
|
|
|
|
|
/* setup one port per worker, linking to all queues */
|
|
|
|
for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) {
|
|
|
|
struct worker_data *w = &t->worker[port];
|
|
|
|
|
|
|
|
w->dev_id = opt->dev_id;
|
|
|
|
w->port_id = port;
|
|
|
|
w->t = t;
|
|
|
|
w->processed_pkts = 0;
|
|
|
|
|
|
|
|
ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to setup port %d", port);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL,
|
|
|
|
nb_queues) != nb_queues)
|
|
|
|
goto link_fail;
|
2018-01-16 17:45:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
link_fail:
|
2018-09-24 08:02:19 +00:00
|
|
|
evt_err("failed to link queues to port %d", port);
|
2018-01-16 17:45:58 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
|
|
|
|
struct rte_event_port_conf prod_conf)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
uint16_t prod;
|
2021-03-31 09:30:00 +00:00
|
|
|
struct rte_mempool *vector_pool = NULL;
|
2018-01-16 17:45:58 +00:00
|
|
|
struct rte_event_eth_rx_adapter_queue_conf queue_conf;
|
|
|
|
|
|
|
|
memset(&queue_conf, 0,
|
|
|
|
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
|
|
|
|
queue_conf.ev.sched_type = opt->sched_type_list[0];
|
2021-03-31 09:30:00 +00:00
|
|
|
if (opt->ena_vector) {
|
|
|
|
unsigned int nb_elem = (opt->pool_sz / opt->vector_size) << 1;
|
|
|
|
|
|
|
|
nb_elem = nb_elem ? nb_elem : 1;
|
|
|
|
vector_pool = rte_event_vector_pool_create(
|
|
|
|
"vector_pool", nb_elem, 0, opt->vector_size,
|
|
|
|
opt->socket_id);
|
|
|
|
if (vector_pool == NULL) {
|
|
|
|
evt_err("failed to create event vector pool");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
2018-04-05 15:33:20 +00:00
|
|
|
RTE_ETH_FOREACH_DEV(prod) {
|
2021-03-31 09:30:00 +00:00
|
|
|
struct rte_event_eth_rx_adapter_vector_limits limits;
|
2018-01-16 17:45:58 +00:00
|
|
|
uint32_t cap;
|
|
|
|
|
|
|
|
ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
|
|
|
|
prod, &cap);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to get event rx adapter[%d]"
|
|
|
|
" capabilities",
|
|
|
|
opt->dev_id);
|
|
|
|
return ret;
|
|
|
|
}
|
2021-03-31 09:30:00 +00:00
|
|
|
|
|
|
|
if (opt->ena_vector) {
|
|
|
|
memset(&limits, 0, sizeof(limits));
|
|
|
|
ret = rte_event_eth_rx_adapter_vector_limits_get(
|
|
|
|
opt->dev_id, prod, &limits);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to get vector limits");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opt->vector_size < limits.min_sz ||
|
|
|
|
opt->vector_size > limits.max_sz) {
|
|
|
|
evt_err("Vector size [%d] not within limits max[%d] min[%d]",
|
|
|
|
opt->vector_size, limits.min_sz,
|
|
|
|
limits.max_sz);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (limits.log2_sz &&
|
|
|
|
!rte_is_power_of_2(opt->vector_size)) {
|
|
|
|
evt_err("Vector size [%d] not power of 2",
|
|
|
|
opt->vector_size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opt->vector_tmo_nsec > limits.max_timeout_ns ||
|
|
|
|
opt->vector_tmo_nsec < limits.min_timeout_ns) {
|
|
|
|
evt_err("Vector timeout [%" PRIu64
|
|
|
|
"] not within limits max[%" PRIu64
|
|
|
|
"] min[%" PRIu64 "]",
|
|
|
|
opt->vector_tmo_nsec,
|
|
|
|
limits.max_timeout_ns,
|
|
|
|
limits.min_timeout_ns);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
|
2021-09-15 13:15:20 +00:00
|
|
|
queue_conf.vector_sz = opt->vector_size;
|
|
|
|
queue_conf.vector_timeout_ns =
|
|
|
|
opt->vector_tmo_nsec;
|
2021-03-31 09:30:00 +00:00
|
|
|
queue_conf.rx_queue_flags |=
|
|
|
|
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
|
2021-09-15 13:15:20 +00:00
|
|
|
queue_conf.vector_mp = vector_pool;
|
2021-03-31 09:30:00 +00:00
|
|
|
} else {
|
|
|
|
evt_err("Rx adapter doesn't support event vector");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2018-01-16 17:45:58 +00:00
|
|
|
queue_conf.ev.queue_id = prod * stride;
|
|
|
|
ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
|
|
|
|
&prod_conf);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to create rx adapter[%d]", prod);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
|
|
|
|
&queue_conf);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to add rx queues to adapter[%d]", prod);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
|
2019-11-07 15:03:11 +00:00
|
|
|
uint32_t service_id = -1U;
|
2018-01-16 17:45:58 +00:00
|
|
|
|
|
|
|
rte_event_eth_rx_adapter_service_id_get(prod,
|
|
|
|
&service_id);
|
|
|
|
ret = evt_service_setup(service_id);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("Failed to setup service core"
|
2018-09-24 08:02:18 +00:00
|
|
|
" for Rx adapter");
|
2018-01-16 17:45:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
evt_info("Port[%d] using Rx adapter[%d] configured", prod,
|
|
|
|
prod);
|
2018-01-16 17:45:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-16 17:45:59 +00:00
|
|
|
int
|
2018-09-24 08:02:19 +00:00
|
|
|
pipeline_event_tx_adapter_setup(struct evt_options *opt,
|
|
|
|
struct rte_event_port_conf port_conf)
|
2018-01-16 17:45:59 +00:00
|
|
|
{
|
2018-11-16 16:58:54 +00:00
|
|
|
int ret = 0;
|
2018-09-24 08:02:19 +00:00
|
|
|
uint16_t consm;
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
RTE_ETH_FOREACH_DEV(consm) {
|
|
|
|
uint32_t cap;
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id,
|
|
|
|
consm, &cap);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to get event tx adapter[%d] caps",
|
|
|
|
consm);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2021-03-31 09:30:00 +00:00
|
|
|
if (opt->ena_vector) {
|
|
|
|
if (!(cap &
|
|
|
|
RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR)) {
|
|
|
|
evt_err("Tx adapter doesn't support event vector");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id,
|
|
|
|
&port_conf);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to create tx adapter[%d]", consm);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("failed to add tx queues to adapter[%d]",
|
|
|
|
consm);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
|
2019-11-07 15:03:11 +00:00
|
|
|
uint32_t service_id = -1U;
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2020-05-13 20:22:48 +00:00
|
|
|
ret = rte_event_eth_tx_adapter_service_id_get(consm,
|
|
|
|
&service_id);
|
|
|
|
if (ret != -ESRCH && ret != 0) {
|
|
|
|
evt_err("Failed to get Tx adptr service ID");
|
|
|
|
return ret;
|
|
|
|
}
|
2018-09-24 08:02:19 +00:00
|
|
|
ret = evt_service_setup(service_id);
|
|
|
|
if (ret) {
|
|
|
|
evt_err("Failed to setup service core"
|
2020-05-13 20:22:48 +00:00
|
|
|
" for Tx adapter");
|
2018-09-24 08:02:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
evt_info("Port[%d] using Tx adapter[%d] Configured", consm,
|
|
|
|
consm);
|
|
|
|
}
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2018-09-24 08:02:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2018-01-16 17:45:59 +00:00
|
|
|
|
2018-01-16 17:45:57 +00:00
|
|
|
void
|
|
|
|
pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
2018-04-05 15:33:20 +00:00
|
|
|
uint16_t i;
|
2018-01-16 17:45:57 +00:00
|
|
|
RTE_SET_USED(test);
|
|
|
|
RTE_SET_USED(opt);
|
|
|
|
|
2018-04-05 15:33:20 +00:00
|
|
|
RTE_ETH_FOREACH_DEV(i) {
|
2018-01-16 17:45:57 +00:00
|
|
|
rte_event_eth_rx_adapter_stop(i);
|
2018-09-24 08:02:19 +00:00
|
|
|
rte_event_eth_tx_adapter_stop(i);
|
2018-01-16 17:45:57 +00:00
|
|
|
rte_eth_dev_stop(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
|
|
|
RTE_SET_USED(test);
|
|
|
|
|
2019-03-01 07:16:47 +00:00
|
|
|
rte_event_dev_stop(opt->dev_id);
|
2018-01-16 17:45:57 +00:00
|
|
|
rte_event_dev_close(opt->dev_id);
|
|
|
|
}
|
|
|
|
|
2018-01-16 17:45:55 +00:00
|
|
|
int
|
|
|
|
pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
|
|
|
struct test_pipeline *t = evt_test_priv(test);
|
2019-11-21 19:22:39 +00:00
|
|
|
int i, ret;
|
2019-09-30 16:48:41 +00:00
|
|
|
|
|
|
|
if (!opt->mbuf_sz)
|
|
|
|
opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE;
|
|
|
|
|
|
|
|
if (!opt->max_pkt_sz)
|
|
|
|
opt->max_pkt_sz = RTE_ETHER_MAX_LEN;
|
|
|
|
|
|
|
|
RTE_ETH_FOREACH_DEV(i) {
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
uint16_t data_size = 0;
|
|
|
|
|
|
|
|
memset(&dev_info, 0, sizeof(dev_info));
|
2019-11-21 19:22:39 +00:00
|
|
|
ret = rte_eth_dev_info_get(i, &dev_info);
|
|
|
|
if (ret != 0) {
|
|
|
|
evt_err("Error during getting device (port %u) info: %s\n",
|
|
|
|
i, strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-30 16:48:41 +00:00
|
|
|
if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
|
|
|
|
dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
|
|
|
|
data_size = opt->max_pkt_sz /
|
|
|
|
dev_info.rx_desc_lim.nb_mtu_seg_max;
|
|
|
|
data_size += RTE_PKTMBUF_HEADROOM;
|
|
|
|
|
|
|
|
if (data_size > opt->mbuf_sz)
|
|
|
|
opt->mbuf_sz = data_size;
|
|
|
|
}
|
2021-07-01 06:07:59 +00:00
|
|
|
if (opt->per_port_pool) {
|
|
|
|
char name[RTE_MEMPOOL_NAMESIZE];
|
|
|
|
|
|
|
|
snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d",
|
|
|
|
test->name, i);
|
|
|
|
t->pool[i] = rte_pktmbuf_pool_create(
|
|
|
|
name, /* mempool name */
|
|
|
|
opt->pool_sz, /* number of elements*/
|
|
|
|
0, /* cache size*/
|
|
|
|
0, opt->mbuf_sz, opt->socket_id); /* flags */
|
|
|
|
|
|
|
|
if (t->pool[i] == NULL) {
|
|
|
|
evt_err("failed to create mempool %s", name);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
2019-09-30 16:48:41 +00:00
|
|
|
}
|
2018-01-16 17:45:55 +00:00
|
|
|
|
2021-07-01 06:07:59 +00:00
|
|
|
if (!opt->per_port_pool) {
|
|
|
|
t->pool[0] = rte_pktmbuf_pool_create(
|
|
|
|
test->name, /* mempool name */
|
2018-01-16 17:45:55 +00:00
|
|
|
opt->pool_sz, /* number of elements*/
|
2021-07-01 06:07:59 +00:00
|
|
|
0, /* cache size*/
|
|
|
|
0, opt->mbuf_sz, opt->socket_id); /* flags */
|
|
|
|
|
|
|
|
if (t->pool[0] == NULL) {
|
|
|
|
evt_err("failed to create mempool");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2018-01-16 17:45:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
|
|
|
struct test_pipeline *t = evt_test_priv(test);
|
2021-07-01 06:07:59 +00:00
|
|
|
int i;
|
2018-01-16 17:45:55 +00:00
|
|
|
|
2021-07-01 06:07:59 +00:00
|
|
|
RTE_SET_USED(opt);
|
|
|
|
if (opt->per_port_pool) {
|
|
|
|
RTE_ETH_FOREACH_DEV(i)
|
|
|
|
rte_mempool_free(t->pool[i]);
|
|
|
|
} else {
|
|
|
|
rte_mempool_free(t->pool[0]);
|
|
|
|
}
|
2018-01-16 17:45:55 +00:00
|
|
|
}
|
|
|
|
|
2018-01-16 17:45:54 +00:00
|
|
|
int
|
|
|
|
pipeline_test_setup(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
|
|
|
void *test_pipeline;
|
|
|
|
|
|
|
|
test_pipeline = rte_zmalloc_socket(test->name,
|
|
|
|
sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE,
|
|
|
|
opt->socket_id);
|
|
|
|
if (test_pipeline == NULL) {
|
|
|
|
evt_err("failed to allocate test_pipeline memory");
|
|
|
|
goto nomem;
|
|
|
|
}
|
|
|
|
test->test_priv = test_pipeline;
|
|
|
|
|
|
|
|
struct test_pipeline *t = evt_test_priv(test);
|
|
|
|
|
|
|
|
t->nb_workers = evt_nr_active_lcores(opt->wlcores);
|
|
|
|
t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores);
|
|
|
|
t->done = false;
|
|
|
|
t->nb_flows = opt->nb_flows;
|
|
|
|
t->result = EVT_TEST_FAILED;
|
|
|
|
t->opt = opt;
|
|
|
|
opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR;
|
|
|
|
memcpy(t->sched_type_list, opt->sched_type_list,
|
|
|
|
sizeof(opt->sched_type_list));
|
|
|
|
return 0;
|
|
|
|
nomem:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pipeline_test_destroy(struct evt_test *test, struct evt_options *opt)
|
|
|
|
{
|
|
|
|
RTE_SET_USED(opt);
|
|
|
|
|
|
|
|
rte_free(test->test_priv);
|
|
|
|
}
|