examples/ipsec-secgw: add per-core packet statistics

Adding per core packet handling stats to analyze traffic distribution
when multiple cores are engaged.

Since aggregating the packet stats across cores would affect
performance, keeping the feature disabled using compile time flags.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Anoob Joseph 2020-05-13 23:15:19 +05:30 committed by Akhil Goyal
parent d2fd16c8b0
commit 1329602b6c
4 changed files with 170 additions and 41 deletions

View File

@ -47,6 +47,7 @@
#include <rte_eventdev.h>
#include <rte_ip.h>
#include <rte_ip_frag.h>
#include <rte_alarm.h>
#include "event_helper.h"
#include "ipsec.h"
@ -287,6 +288,70 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph,
}
}
#if (STATS_INTERVAL > 0)
/* Print out statistics on packet distribution */
static void
print_stats_cb(__rte_unused void *param)
{
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
float burst_percent, rx_per_call, tx_per_call;
unsigned int coreid;
total_packets_dropped = 0;
total_packets_tx = 0;
total_packets_rx = 0;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
/* Clear screen and move to top left */
printf("%s%s", clr, topLeft);
printf("\nCore statistics ====================================");
for (coreid = 0; coreid < RTE_MAX_LCORE; coreid++) {
/* skip disabled cores */
if (rte_lcore_is_enabled(coreid) == 0)
continue;
burst_percent = (float)(core_statistics[coreid].burst_rx * 100)/
core_statistics[coreid].rx;
rx_per_call = (float)(core_statistics[coreid].rx)/
core_statistics[coreid].rx_call;
tx_per_call = (float)(core_statistics[coreid].tx)/
core_statistics[coreid].tx_call;
printf("\nStatistics for core %u ------------------------------"
"\nPackets received: %20"PRIu64
"\nPackets sent: %24"PRIu64
"\nPackets dropped: %21"PRIu64
"\nBurst percent: %23.2f"
"\nPackets per Rx call: %17.2f"
"\nPackets per Tx call: %17.2f",
coreid,
core_statistics[coreid].rx,
core_statistics[coreid].tx,
core_statistics[coreid].dropped,
burst_percent,
rx_per_call,
tx_per_call);
total_packets_dropped += core_statistics[coreid].dropped;
total_packets_tx += core_statistics[coreid].tx;
total_packets_rx += core_statistics[coreid].rx;
}
printf("\nAggregate statistics ==============================="
"\nTotal packets received: %14"PRIu64
"\nTotal packets sent: %18"PRIu64
"\nTotal packets dropped: %15"PRIu64,
total_packets_rx,
total_packets_tx,
total_packets_dropped);
printf("\n====================================================\n");
rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
}
#endif /* STATS_INTERVAL */
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
@ -332,7 +397,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
/* drop packet when IPv6 header exceeds first segment length */
if (unlikely(l3len > pkt->data_len)) {
rte_pktmbuf_free(pkt);
free_pkts(&pkt, 1);
return;
}
@ -349,7 +414,7 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n",
rte_be_to_cpu_16(eth->ether_type));
rte_pktmbuf_free(pkt);
free_pkts(&pkt, 1);
return;
}
@ -476,9 +541,12 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
prepare_tx_burst(m_table, n, port, qconf);
ret = rte_eth_tx_burst(port, queueid, m_table, n);
core_stats_update_tx(ret);
if (unlikely(ret < n)) {
do {
rte_pktmbuf_free(m_table[ret]);
free_pkts(&m_table[ret], 1);
} while (++ret < n);
}
@ -524,7 +592,7 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m,
"error code: %d\n",
__func__, m->pkt_len, rte_errno);
rte_pktmbuf_free(m);
free_pkts(&m, 1);
return len;
}
@ -549,7 +617,7 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto)
} else if (frag_tbl_sz > 0)
len = send_fragment_packet(qconf, m, port, proto);
else
rte_pktmbuf_free(m);
free_pkts(&m, 1);
/* enough pkts to be sent */
if (unlikely(len == MAX_PKT_BURST)) {
@ -583,19 +651,19 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
continue;
}
if (res == DISCARD) {
rte_pktmbuf_free(m);
free_pkts(&m, 1);
continue;
}
/* Only check SPI match for processed IPSec packets */
if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) {
rte_pktmbuf_free(m);
free_pkts(&m, 1);
continue;
}
sa_idx = res - 1;
if (!inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
free_pkts(&m, 1);
continue;
}
ip->pkts[j++] = m;
@ -630,7 +698,7 @@ split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
offsetof(struct ip6_hdr, ip6_nxt));
n6++;
} else
rte_pktmbuf_free(m);
free_pkts(&m, 1);
}
trf->ip4.num = n4;
@ -682,7 +750,7 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
m = ip->pkts[i];
sa_idx = ip->res[i] - 1;
if (ip->res[i] == DISCARD)
rte_pktmbuf_free(m);
free_pkts(&m, 1);
else if (ip->res[i] == BYPASS)
ip->pkts[j++] = m;
else {
@ -701,8 +769,7 @@ process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
uint16_t idx, nb_pkts_out, i;
/* Drop any IPsec traffic from protected ports */
for (i = 0; i < traffic->ipsec.num; i++)
rte_pktmbuf_free(traffic->ipsec.pkts[i]);
free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
traffic->ipsec.num = 0;
@ -742,14 +809,12 @@ process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
uint32_t nb_pkts_in, i, idx;
/* Drop any IPv4 traffic from unprotected ports */
for (i = 0; i < traffic->ip4.num; i++)
rte_pktmbuf_free(traffic->ip4.pkts[i]);
free_pkts(traffic->ip4.pkts, traffic->ip4.num);
traffic->ip4.num = 0;
/* Drop any IPv6 traffic from unprotected ports */
for (i = 0; i < traffic->ip6.num; i++)
rte_pktmbuf_free(traffic->ip6.pkts[i]);
free_pkts(traffic->ip6.pkts, traffic->ip6.num);
traffic->ip6.num = 0;
@ -785,8 +850,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ip *ip;
/* Drop any IPsec traffic from protected ports */
for (i = 0; i < traffic->ipsec.num; i++)
rte_pktmbuf_free(traffic->ipsec.pkts[i]);
free_pkts(traffic->ipsec.pkts, traffic->ipsec.num);
n = 0;
@ -900,7 +964,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) {
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
}
send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP);
@ -952,7 +1016,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
if (pkt_hop == -1) {
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
}
send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6);
@ -1168,8 +1232,10 @@ ipsec_poll_mode_worker(void)
nb_rx = rte_eth_rx_burst(portid, queueid,
pkts, MAX_PKT_BURST);
if (nb_rx > 0)
if (nb_rx > 0) {
core_stats_update_rx(nb_rx);
process_pkts(qconf, pkts, nb_rx, portid);
}
/* dequeue and process completed crypto-ops */
if (is_unprotected_port(portid))
@ -2916,6 +2982,12 @@ main(int32_t argc, char **argv)
check_all_ports_link_status(enabled_port_mask);
#if (STATS_INTERVAL > 0)
rte_eal_alarm_set(STATS_INTERVAL * US_PER_S, print_stats_cb, NULL);
#else
RTE_LOG(INFO, IPSEC, "Stats display disabled\n");
#endif /* STATS_INTERVAL */
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(ipsec_launch_one_lcore, eh_conf, CALL_MASTER);
RTE_LCORE_FOREACH_SLAVE(lcore_id) {

View File

@ -6,6 +6,10 @@
#include <stdbool.h>
#ifndef STATS_INTERVAL
#define STATS_INTERVAL 0
#endif
#define NB_SOCKETS 4
#define MAX_PKT_BURST 32
@ -69,6 +73,19 @@ struct ethaddr_info {
uint64_t src, dst;
};
#if (STATS_INTERVAL > 0)
struct ipsec_core_statistics {
uint64_t tx;
uint64_t rx;
uint64_t rx_call;
uint64_t tx_call;
uint64_t dropped;
uint64_t burst_rx;
} __rte_cache_aligned;
struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE];
#endif /* STATS_INTERVAL */
extern struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS];
/* Port mask to identify the unprotected ports */
@ -85,4 +102,53 @@ is_unprotected_port(uint16_t port_id)
return unprotected_port_mask & (1 << port_id);
}
static inline void
core_stats_update_rx(int n)
{
#if (STATS_INTERVAL > 0)
int lcore_id = rte_lcore_id();
core_statistics[lcore_id].rx += n;
core_statistics[lcore_id].rx_call++;
if (n == MAX_PKT_BURST)
core_statistics[lcore_id].burst_rx += n;
#else
RTE_SET_USED(n);
#endif /* STATS_INTERVAL */
}
static inline void
core_stats_update_tx(int n)
{
#if (STATS_INTERVAL > 0)
int lcore_id = rte_lcore_id();
core_statistics[lcore_id].tx += n;
core_statistics[lcore_id].tx_call++;
#else
RTE_SET_USED(n);
#endif /* STATS_INTERVAL */
}
static inline void
core_stats_update_drop(int n)
{
#if (STATS_INTERVAL > 0)
int lcore_id = rte_lcore_id();
core_statistics[lcore_id].dropped += n;
#else
RTE_SET_USED(n);
#endif /* STATS_INTERVAL */
}
/* helper routine to free bulk of packets */
static inline void
free_pkts(struct rte_mbuf *mb[], uint32_t n)
{
uint32_t i;
for (i = 0; i != n; i++)
rte_pktmbuf_free(mb[i]);
core_stats_update_drop(n);
}
#endif /* _IPSEC_SECGW_H_ */

View File

@ -500,7 +500,7 @@ enqueue_cop_burst(struct cdev_qp *cqp)
cqp->id, cqp->qp, ret, len);
/* drop packets that we fail to enqueue */
for (i = ret; i < len; i++)
rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
free_pkts(&cqp->buf[i]->sym->m_src, 1);
}
cqp->in_flight += ret;
cqp->len = 0;
@ -528,7 +528,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
for (i = 0; i < nb_pkts; i++) {
if (unlikely(sas[i] == NULL)) {
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
}
@ -549,7 +549,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
if ((unlikely(ips->security.ses == NULL)) &&
create_lookaside_session(ipsec_ctx, sa, ips)) {
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
}
@ -563,7 +563,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
" legacy mode.");
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
case RTE_SECURITY_ACTION_TYPE_NONE:
@ -575,7 +575,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
if ((unlikely(ips->crypto.ses == NULL)) &&
create_lookaside_session(ipsec_ctx, sa, ips)) {
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
}
@ -584,7 +584,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
}
break;
@ -608,7 +608,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
free_pkts(&pkts[i], 1);
continue;
}
@ -643,7 +643,7 @@ ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa = priv->sa;
ret = xform_func(pkt, sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkt);
free_pkts(&pkt, 1);
continue;
}
pkts[nb_pkts++] = pkt;
@ -690,13 +690,13 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
RTE_SECURITY_ACTION_TYPE_NONE) {
ret = xform_func(pkt, sa, cops[j]);
if (unlikely(ret)) {
rte_pktmbuf_free(pkt);
free_pkts(&pkt, 1);
continue;
}
} else if (ipsec_get_action_type(sa) ==
RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
if (cops[j]->status) {
rte_pktmbuf_free(pkt);
free_pkts(&pkt, 1);
continue;
}
}

View File

@ -12,22 +12,13 @@
#include <rte_mbuf.h>
#include "ipsec.h"
#include "ipsec-secgw.h"
#define SATP_OUT_IPV4(t) \
((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
/* helper routine to free bulk of packets */
static inline void
free_pkts(struct rte_mbuf *mb[], uint32_t n)
{
uint32_t i;
for (i = 0; i != n; i++)
rte_pktmbuf_free(mb[i]);
}
/* helper routine to free bulk of crypto-ops and related packets */
static inline void
free_cops(struct rte_crypto_op *cop[], uint32_t n)