examples/l3fwd: split processing and send stages

Split packet processing from packet send stage, as send stage
is not common for poll and event mode.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Shijith Thotton <sthotton@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2022-10-25 21:35:36 +05:30 committed by Thomas Monjalon
parent 927cb43fe9
commit cb0ddabf2f
4 changed files with 95 additions and 29 deletions

View File

@ -177,16 +177,12 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
return portid;
}
/*
* Buffer optimized handling of packets, invoked
* from main_loop.
*/
static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint16_t portid, struct lcore_conf *qconf)
l3fwd_em_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint16_t *dst_port, uint16_t portid,
struct lcore_conf *qconf, const uint8_t do_step3)
{
int32_t i, j, pos;
uint16_t dst_port[MAX_PKT_BURST];
/*
* Send nb_rx - nb_rx % EM_HASH_LOOKUP_COUNT packets
@ -233,13 +229,30 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
dst_port[j + i] = em_get_dst_port(qconf,
pkts_burst[j + i], portid);
}
for (i = 0; i < EM_HASH_LOOKUP_COUNT && do_step3; i += FWDSTEP)
processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
}
for (; j < nb_rx; j++)
for (; j < nb_rx; j++) {
dst_port[j] = em_get_dst_port(qconf, pkts_burst[j], portid);
if (do_step3)
process_packet(pkts_burst[j], &pkts_burst[j]->port);
}
}
/*
* Buffer optimized handling of packets, invoked
* from main_loop.
*/
static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
struct lcore_conf *qconf)
{
uint16_t dst_port[MAX_PKT_BURST];
l3fwd_em_process_packets(nb_rx, pkts_burst, dst_port, portid, qconf, 0);
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
}
/*
@ -260,11 +273,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
*/
int32_t n = RTE_ALIGN_FLOOR(nb_rx, EM_HASH_LOOKUP_COUNT);
for (j = 0; j < EM_HASH_LOOKUP_COUNT && j < nb_rx; j++) {
for (j = 0; j < nb_rx; j++)
pkts_burst[j] = ev[j]->mbuf;
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j],
struct rte_ether_hdr *) + 1);
}
for (j = 0; j < n; j += EM_HASH_LOOKUP_COUNT) {
@ -305,7 +315,8 @@ l3fwd_em_process_events(int nb_rx, struct rte_event **ev,
}
continue;
}
processx4_step3(&pkts_burst[j], &dst_port[j]);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i += FWDSTEP)
processx4_step3(&pkts_burst[j + i], &dst_port[j + i]);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++)
pkts_burst[j + i]->port = dst_port[j + i];

View File

@ -96,11 +96,11 @@ processx4_step2(const struct lcore_conf *qconf,
* from main_loop.
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint8_t portid, struct lcore_conf *qconf)
l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint8_t portid, uint16_t *dst_port,
struct lcore_conf *qconf, const uint8_t do_step3)
{
int32_t j;
uint16_t dst_port[MAX_PKT_BURST];
__vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@ -114,22 +114,41 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
ipv4_flag[j / FWDSTEP],
portid, &pkts_burst[j], &dst_port[j]);
if (do_step3)
for (j = 0; j != k; j += FWDSTEP)
processx4_step3(&pkts_burst[j], &dst_port[j]);
/* Classify last up to 3 packets one by one */
switch (nb_rx % FWDSTEP) {
case 3:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
/* fall-through */
case 2:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
/* fall-through */
case 1:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
/* fall-through */
}
}
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint8_t portid,
struct lcore_conf *qconf)
{
uint16_t dst_port[MAX_PKT_BURST];
l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
0);
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
}

View File

@ -80,16 +80,12 @@ processx4_step2(const struct lcore_conf *qconf,
}
}
/*
* Buffer optimized handling of packets, invoked
* from main_loop.
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint16_t portid, struct lcore_conf *qconf)
l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint16_t portid, uint16_t *dst_port,
struct lcore_conf *qconf, const uint8_t do_step3)
{
int32_t i = 0, j = 0;
uint16_t dst_port[MAX_PKT_BURST];
int32x4_t dip;
uint32_t ipv4_flag;
const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@ -100,7 +96,6 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i],
void *));
}
for (j = 0; j != k - FWDSTEP; j += FWDSTEP) {
for (i = 0; i < FWDSTEP; i++) {
rte_prefetch0(rte_pktmbuf_mtod(
@ -111,11 +106,15 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
processx4_step2(qconf, dip, ipv4_flag, portid,
&pkts_burst[j], &dst_port[j]);
if (do_step3)
processx4_step3(&pkts_burst[j], &dst_port[j]);
}
processx4_step1(&pkts_burst[j], &dip, &ipv4_flag);
processx4_step2(qconf, dip, ipv4_flag, portid, &pkts_burst[j],
&dst_port[j]);
if (do_step3)
processx4_step3(&pkts_burst[j], &dst_port[j]);
j += FWDSTEP;
}
@ -138,26 +137,44 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
void *));
j++;
}
j -= m;
/* Classify last up to 3 packets one by one */
switch (m) {
case 3:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
/* fallthrough */
case 2:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
/* fallthrough */
case 1:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j],
portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
}
}
}
/*
* Buffer optimized handling of packets, invoked
* from main_loop.
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
struct lcore_conf *qconf)
{
uint16_t dst_port[MAX_PKT_BURST];
l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
0);
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
}

View File

@ -82,11 +82,11 @@ processx4_step2(const struct lcore_conf *qconf,
* from main_loop.
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint16_t portid, struct lcore_conf *qconf)
l3fwd_lpm_process_packets(int nb_rx, struct rte_mbuf **pkts_burst,
uint16_t portid, uint16_t *dst_port,
struct lcore_conf *qconf, const uint8_t do_step3)
{
int32_t j;
uint16_t dst_port[MAX_PKT_BURST];
__m128i dip[MAX_PKT_BURST / FWDSTEP];
uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
@ -99,21 +99,40 @@ l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
processx4_step2(qconf, dip[j / FWDSTEP],
ipv4_flag[j / FWDSTEP], portid, &pkts_burst[j], &dst_port[j]);
if (do_step3)
for (j = 0; j != k; j += FWDSTEP)
processx4_step3(&pkts_burst[j], &dst_port[j]);
/* Classify last up to 3 packets one by one */
switch (nb_rx % FWDSTEP) {
case 3:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
/* fall-through */
case 2:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
/* fall-through */
case 1:
dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
if (do_step3)
process_packet(pkts_burst[j], &dst_port[j]);
j++;
}
}
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid,
struct lcore_conf *qconf)
{
uint16_t dst_port[MAX_PKT_BURST];
l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf,
0);
send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
}