examples/l2fwd-event: support event vector

Added changes to receive packets as event vector. By default this is
disabled and can be enabled using the option --event-vector. Vector
size and timeout to form the vector can be configured using options
--event-vector-size and --event-vector-tmo.

Example:
dpdk-l2fwd-event -l 0-3 -n 4 -- -p 0x03 --mode=eventdev \
	--eventq-sched=ordered --event-vector --event-vector-size 16

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Shijith Thotton 2021-09-27 11:51:35 +05:30 committed by Jerin Jacob
parent e8adca1951
commit 796b07e9c6
6 changed files with 306 additions and 14 deletions

View File

@ -52,7 +52,12 @@ The application requires a number of command line options:
.. code-block:: console
./<build_dir>/examples/dpdk-l2fwd-event [EAL options] -- -p PORTMASK [-q NQ] --[no-]mac-updating --mode=MODE --eventq-sched=SCHED_MODE
./<build_dir>/examples/dpdk-l2fwd-event [EAL options] -- -p PORTMASK
[-q NQ]
[--[no-]mac-updating]
[--mode=MODE]
[--eventq-sched=SCHED_MODE]
[--event-vector [--event-vector-size SIZE] [--event-vector-tmo NS]]
where,
@ -68,6 +73,12 @@ where,
* --config: Configure forwarding port pair mapping. Alternate port pairs by default.
* --event-vector: Enable event vectorization. Only valid if --mode=eventdev.
* --event-vector-size: Max vector size if event vectorization is enabled.
* --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled.
Sample usage commands are given below to run the application into different mode:
Poll mode with 4 lcores, 16 ports and 8 RX queues per lcore and MAC address updating enabled,

View File

@ -55,6 +55,9 @@
#define DEFAULT_TIMER_PERIOD 10 /* default period is 10 seconds */
#define MAX_TIMER_PERIOD 86400 /* 1 day max */
#define VECTOR_SIZE_DEFAULT MAX_PKT_BURST
#define VECTOR_TMO_NS_DEFAULT 1E6 /* 1ms */
/* Per-port statistics struct */
struct l2fwd_port_statistics {
uint64_t dropped;
@ -62,6 +65,13 @@ struct l2fwd_port_statistics {
uint64_t rx;
} __rte_cache_aligned;
/* Event vector attributes */
struct l2fwd_event_vector_params {
uint8_t enabled;
uint16_t size;
uint64_t timeout_ns;
};
struct l2fwd_resources {
volatile uint8_t force_quit;
uint8_t event_mode;
@ -74,9 +84,11 @@ struct l2fwd_resources {
uint32_t enabled_port_mask;
uint64_t timer_period;
struct rte_mempool *pktmbuf_pool;
struct rte_mempool *evt_vec_pool;
uint32_t dst_ports[RTE_MAX_ETHPORTS];
struct rte_ether_addr eth_addr[RTE_MAX_ETHPORTS];
struct l2fwd_port_statistics port_stats[RTE_MAX_ETHPORTS];
struct l2fwd_event_vector_params evt_vec;
void *evt_rsrc;
void *poll_rsrc;
} __rte_cache_aligned;

View File

@ -345,19 +345,198 @@ l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
}
static __rte_always_inline void
l2fwd_event_vector_fwd(struct l2fwd_resources *rsrc,
struct rte_event_vector *vec,
const uint64_t timer_period, const uint32_t flags)
{
struct rte_mbuf **mbufs = vec->mbufs;
uint16_t i, j;
rte_prefetch0(rte_pktmbuf_mtod(mbufs[0], void *));
/* If vector attribute is valid, mbufs will be from same port/queue */
if (vec->attr_valid) {
vec->port = rsrc->dst_ports[mbufs[0]->port];
if (flags & L2FWD_EVENT_TX_DIRECT)
vec->queue = 0;
if (timer_period > 0)
__atomic_fetch_add(&rsrc->port_stats[mbufs[0]->port].rx,
vec->nb_elem, __ATOMIC_RELAXED);
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (j < vec->nb_elem)
rte_prefetch0(
rte_pktmbuf_mtod(mbufs[j], void *));
if (flags & L2FWD_EVENT_UPDT_MAC)
l2fwd_mac_updating(
mbufs[i], vec->port,
&rsrc->eth_addr[vec->port]);
}
if (timer_period > 0)
__atomic_fetch_add(&rsrc->port_stats[vec->port].tx,
vec->nb_elem, __ATOMIC_RELAXED);
} else {
for (i = 0, j = 1; i < vec->nb_elem; i++, j++) {
if (timer_period > 0)
__atomic_fetch_add(
&rsrc->port_stats[mbufs[i]->port].rx, 1,
__ATOMIC_RELAXED);
if (j < vec->nb_elem)
rte_prefetch0(
rte_pktmbuf_mtod(mbufs[j], void *));
mbufs[i]->port = rsrc->dst_ports[mbufs[i]->port];
if (flags & L2FWD_EVENT_UPDT_MAC)
l2fwd_mac_updating(
mbufs[i], mbufs[i]->port,
&rsrc->eth_addr[mbufs[i]->port]);
if (flags & L2FWD_EVENT_TX_DIRECT)
rte_event_eth_tx_adapter_txq_set(mbufs[i], 0);
if (timer_period > 0)
__atomic_fetch_add(
&rsrc->port_stats[mbufs[i]->port].tx, 1,
__ATOMIC_RELAXED);
}
}
}
static __rte_always_inline void
l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags)
{
struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
const int port_id = l2fwd_get_free_event_port(evt_rsrc);
const uint8_t tx_q_id =
evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
const uint64_t timer_period = rsrc->timer_period;
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint8_t deq_len = evt_rsrc->deq_depth;
struct rte_event ev[MAX_PKT_BURST];
uint16_t nb_rx, nb_tx;
uint8_t i;
if (port_id < 0)
return;
printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
rte_lcore_id());
while (!rsrc->force_quit) {
nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
deq_len, 0);
if (nb_rx == 0)
continue;
for (i = 0; i < nb_rx; i++) {
if (flags & L2FWD_EVENT_TX_ENQ) {
ev[i].queue_id = tx_q_id;
ev[i].op = RTE_EVENT_OP_FORWARD;
}
l2fwd_event_vector_fwd(rsrc, ev[i].vec, timer_period,
flags);
}
if (flags & L2FWD_EVENT_TX_ENQ) {
nb_tx = rte_event_enqueue_burst(event_d_id, port_id, ev,
nb_rx);
while (nb_tx < nb_rx && !rsrc->force_quit)
nb_tx += rte_event_enqueue_burst(
event_d_id, port_id, ev + nb_tx,
nb_rx - nb_tx);
}
if (flags & L2FWD_EVENT_TX_DIRECT) {
nb_tx = rte_event_eth_tx_adapter_enqueue(
event_d_id, port_id, ev, nb_rx, 0);
while (nb_tx < nb_rx && !rsrc->force_quit)
nb_tx += rte_event_eth_tx_adapter_enqueue(
event_d_id, port_id, ev + nb_tx,
nb_rx - nb_tx, 0);
}
}
}
static void __rte_noinline
l2fwd_event_main_loop_tx_d_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
}
static void __rte_noinline
l2fwd_event_main_loop_tx_d_brst_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_DIRECT);
}
static void __rte_noinline
l2fwd_event_main_loop_tx_q_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
}
static void __rte_noinline
l2fwd_event_main_loop_tx_q_brst_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc, L2FWD_EVENT_TX_ENQ);
}
static void __rte_noinline
l2fwd_event_main_loop_tx_d_mac_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc,
L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
}
static void __rte_noinline
l2fwd_event_main_loop_tx_d_brst_mac_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc,
L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_DIRECT);
}
static void __rte_noinline
l2fwd_event_main_loop_tx_q_mac_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc,
L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
}
static void __rte_noinline
l2fwd_event_main_loop_tx_q_brst_mac_vec(struct l2fwd_resources *rsrc)
{
l2fwd_event_loop_vector(rsrc,
L2FWD_EVENT_UPDT_MAC | L2FWD_EVENT_TX_ENQ);
}
void
l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
{
/* [MAC_UPDT][TX_MODE][BURST] */
const event_loop_cb event_loop[2][2][2] = {
[0][0][0] = l2fwd_event_main_loop_tx_d,
[0][0][1] = l2fwd_event_main_loop_tx_d_brst,
[0][1][0] = l2fwd_event_main_loop_tx_q,
[0][1][1] = l2fwd_event_main_loop_tx_q_brst,
[1][0][0] = l2fwd_event_main_loop_tx_d_mac,
[1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
[1][1][0] = l2fwd_event_main_loop_tx_q_mac,
[1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
const event_loop_cb event_loop[2][2][2][2] = {
[0][0][0][0] = l2fwd_event_main_loop_tx_d,
[0][0][0][1] = l2fwd_event_main_loop_tx_d_brst,
[0][0][1][0] = l2fwd_event_main_loop_tx_q,
[0][0][1][1] = l2fwd_event_main_loop_tx_q_brst,
[0][1][0][0] = l2fwd_event_main_loop_tx_d_mac,
[0][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
[0][1][1][0] = l2fwd_event_main_loop_tx_q_mac,
[0][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
[1][0][0][0] = l2fwd_event_main_loop_tx_d_vec,
[1][0][0][1] = l2fwd_event_main_loop_tx_d_brst_vec,
[1][0][1][0] = l2fwd_event_main_loop_tx_q_vec,
[1][0][1][1] = l2fwd_event_main_loop_tx_q_brst_vec,
[1][1][0][0] = l2fwd_event_main_loop_tx_d_mac_vec,
[1][1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac_vec,
[1][1][1][0] = l2fwd_event_main_loop_tx_q_mac_vec,
[1][1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac_vec,
};
struct l2fwd_event_resources *evt_rsrc;
uint32_t event_queue_cfg;
@ -393,8 +572,7 @@ l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
if (ret < 0)
rte_panic("Error in starting eventdev\n");
evt_rsrc->ops.l2fwd_event_loop = event_loop
[rsrc->mac_updating]
[evt_rsrc->tx_mode_q]
[evt_rsrc->has_burst];
evt_rsrc->ops.l2fwd_event_loop =
event_loop[rsrc->evt_vec.enabled][rsrc->mac_updating]
[evt_rsrc->tx_mode_q][evt_rsrc->has_burst];
}

View File

@ -245,6 +245,27 @@ l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
continue;
eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
if (rsrc->evt_vec.enabled) {
uint32_t cap;
if (rte_event_eth_rx_adapter_caps_get(event_d_id,
port_id, &cap))
rte_panic(
"Failed to get event rx adapter capability");
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
eth_q_conf.vector_sz = rsrc->evt_vec.size;
eth_q_conf.vector_timeout_ns =
rsrc->evt_vec.timeout_ns;
eth_q_conf.vector_mp = rsrc->evt_vec_pool;
eth_q_conf.rx_queue_flags |=
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
} else {
rte_panic(
"Rx adapter doesn't support event vector");
}
}
ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
-1, &eth_q_conf);
if (ret)

View File

@ -230,6 +230,28 @@ l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc)
RTE_ETH_FOREACH_DEV(port_id) {
if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
continue;
if (rsrc->evt_vec.enabled) {
uint32_t cap;
if (rte_event_eth_rx_adapter_caps_get(event_d_id,
port_id, &cap))
rte_panic(
"Failed to get event rx adapter capability");
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
eth_q_conf.vector_sz = rsrc->evt_vec.size;
eth_q_conf.vector_timeout_ns =
rsrc->evt_vec.timeout_ns;
eth_q_conf.vector_mp = rsrc->evt_vec_pool;
eth_q_conf.rx_queue_flags |=
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
} else {
rte_panic(
"Rx adapter doesn't support event vector");
}
}
ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
&evt_rsrc->def_p_conf);
if (ret)

View File

@ -25,6 +25,9 @@ l2fwd_event_usage(const char *prgname)
" --eventq-sched: Event queue schedule type, ordered, atomic or parallel.\n"
" Default: atomic\n"
" Valid only if --mode=eventdev\n"
" --event-vector: Enable event vectorization.\n"
" --event-vector-size: Max vector size if event vectorization is enabled.\n"
" --event-vector-tmo: Max timeout to form vector in nanoseconds if event vectorization is enabled\n"
" --config: Configure forwarding port pair mapping\n"
" Default: alternate port pairs\n\n",
prgname);
@ -175,6 +178,9 @@ static const char short_options[] =
#define CMD_LINE_OPT_MODE "mode"
#define CMD_LINE_OPT_EVENTQ_SCHED "eventq-sched"
#define CMD_LINE_OPT_PORT_PAIR_CONF "config"
#define CMD_LINE_OPT_ENABLE_VECTOR "event-vector"
#define CMD_LINE_OPT_VECTOR_SIZE "event-vector-size"
#define CMD_LINE_OPT_VECTOR_TMO_NS "event-vector-tmo"
enum {
/* long options mapped to a short option */
@ -186,6 +192,9 @@ enum {
CMD_LINE_OPT_MODE_NUM,
CMD_LINE_OPT_EVENTQ_SCHED_NUM,
CMD_LINE_OPT_PORT_PAIR_CONF_NUM,
CMD_LINE_OPT_ENABLE_VECTOR_NUM,
CMD_LINE_OPT_VECTOR_SIZE_NUM,
CMD_LINE_OPT_VECTOR_TMO_NS_NUM
};
/* Parse the argument given in the command line of the application */
@ -202,6 +211,12 @@ l2fwd_event_parse_args(int argc, char **argv, struct l2fwd_resources *rsrc)
CMD_LINE_OPT_EVENTQ_SCHED_NUM},
{ CMD_LINE_OPT_PORT_PAIR_CONF, required_argument, NULL,
CMD_LINE_OPT_PORT_PAIR_CONF_NUM},
{CMD_LINE_OPT_ENABLE_VECTOR, no_argument, NULL,
CMD_LINE_OPT_ENABLE_VECTOR_NUM},
{CMD_LINE_OPT_VECTOR_SIZE, required_argument, NULL,
CMD_LINE_OPT_VECTOR_SIZE_NUM},
{CMD_LINE_OPT_VECTOR_TMO_NS, required_argument, NULL,
CMD_LINE_OPT_VECTOR_TMO_NS_NUM},
{NULL, 0, 0, 0}
};
int opt, ret, timer_secs;
@ -270,6 +285,16 @@ l2fwd_event_parse_args(int argc, char **argv, struct l2fwd_resources *rsrc)
return -1;
}
break;
case CMD_LINE_OPT_ENABLE_VECTOR_NUM:
printf("event vectorization is enabled\n");
rsrc->evt_vec.enabled = 1;
break;
case CMD_LINE_OPT_VECTOR_SIZE_NUM:
rsrc->evt_vec.size = strtol(optarg, NULL, 10);
break;
case CMD_LINE_OPT_VECTOR_TMO_NS_NUM:
rsrc->evt_vec.timeout_ns = strtoull(optarg, NULL, 10);
break;
/* long options */
case 0:
@ -283,6 +308,18 @@ l2fwd_event_parse_args(int argc, char **argv, struct l2fwd_resources *rsrc)
rsrc->mac_updating = mac_updating;
if (rsrc->evt_vec.enabled && !rsrc->evt_vec.size) {
rsrc->evt_vec.size = VECTOR_SIZE_DEFAULT;
printf("vector size set to default (%" PRIu16 ")\n",
rsrc->evt_vec.size);
}
if (rsrc->evt_vec.enabled && !rsrc->evt_vec.timeout_ns) {
rsrc->evt_vec.timeout_ns = VECTOR_TMO_NS_DEFAULT;
printf("vector timeout set to default (%" PRIu64 " ns)\n",
rsrc->evt_vec.timeout_ns);
}
if (optind >= 0)
argv[optind-1] = prgname;
@ -636,6 +673,17 @@ main(int argc, char **argv)
rte_panic("Cannot init mbuf pool\n");
/* >8 End of creation of mbuf pool. */
if (rsrc->evt_vec.enabled) {
unsigned int nb_vec, vec_size;
vec_size = rsrc->evt_vec.size;
nb_vec = (nb_mbufs + vec_size - 1) / vec_size;
rsrc->evt_vec_pool = rte_event_vector_pool_create(
"vector_pool", nb_vec, 0, vec_size, rte_socket_id());
if (rsrc->evt_vec_pool == NULL)
rte_panic("Cannot init event vector pool\n");
}
nb_ports_available = l2fwd_event_init_ports(rsrc);
if (!nb_ports_available)
rte_panic("All available ports are disabled. Please set portmask.\n");