app/testpmd: cleanup internal Tx offloads flags field

The tx_ol_flags field was used in order to control the different
Tx offloads set. After the conversion to the new Ethdev Tx offloads API
it is not needed anymore as the offloads configuration is stored in
ethdev structs.

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
This commit is contained in:
Shahaf Shuler 2018-01-10 11:09:14 +02:00 committed by Thomas Monjalon
parent b62678f7a3
commit 3eecba267c
8 changed files with 59 additions and 88 deletions

View File

@ -3670,45 +3670,45 @@ static void
csum_show(int port_id)
{
struct rte_eth_dev_info dev_info;
uint16_t ol_flags;
uint64_t tx_offloads;
ol_flags = ports[port_id].tx_ol_flags;
tx_offloads = ports[port_id].dev_conf.txmode.offloads;
printf("Parse tunnel is %s\n",
(ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) ? "on" : "off");
(ports[port_id].parse_tunnel) ? "on" : "off");
printf("IP checksum offload is %s\n",
(ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) ? "hw" : "sw");
(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
printf("UDP checksum offload is %s\n",
(ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
printf("TCP checksum offload is %s\n",
(ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
printf("SCTP checksum offload is %s\n",
(ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
printf("Outer-Ip checksum offload is %s\n",
(ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ? "hw" : "sw");
(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
/* display warnings if configuration is not supported by the NIC */
rte_eth_dev_info_get(port_id, &dev_info);
if ((ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM) &&
if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
printf("Warning: hardware IP checksum enabled but not "
"supported by port %d\n", port_id);
}
if ((ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) &&
if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
printf("Warning: hardware UDP checksum enabled but not "
"supported by port %d\n", port_id);
}
if ((ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) &&
if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
printf("Warning: hardware TCP checksum enabled but not "
"supported by port %d\n", port_id);
}
if ((ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
printf("Warning: hardware SCTP checksum enabled but not "
"supported by port %d\n", port_id);
}
if ((ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) &&
if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
printf("Warning: hardware outer IP checksum enabled but not "
"supported by port %d\n", port_id);
@ -3722,7 +3722,6 @@ cmd_csum_parsed(void *parsed_result,
{
struct cmd_csum_result *res = parsed_result;
int hw = 0;
uint16_t mask = 0;
uint64_t csum_offloads = 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN)) {
@ -3740,28 +3739,21 @@ cmd_csum_parsed(void *parsed_result,
hw = 1;
if (!strcmp(res->proto, "ip")) {
mask = TESTPMD_TX_OFFLOAD_IP_CKSUM;
csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
} else if (!strcmp(res->proto, "udp")) {
mask = TESTPMD_TX_OFFLOAD_UDP_CKSUM;
csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
} else if (!strcmp(res->proto, "tcp")) {
mask = TESTPMD_TX_OFFLOAD_TCP_CKSUM;
csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
} else if (!strcmp(res->proto, "sctp")) {
mask = TESTPMD_TX_OFFLOAD_SCTP_CKSUM;
csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
} else if (!strcmp(res->proto, "outer-ip")) {
mask = TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM;
csum_offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
}
if (hw) {
ports[res->port_id].tx_ol_flags |= mask;
ports[res->port_id].dev_conf.txmode.offloads |=
csum_offloads;
} else {
ports[res->port_id].tx_ol_flags &= (~mask);
ports[res->port_id].dev_conf.txmode.offloads &=
(~csum_offloads);
}
@ -3838,11 +3830,9 @@ cmd_csum_tunnel_parsed(void *parsed_result,
return;
if (!strcmp(res->onoff, "on"))
ports[res->port_id].tx_ol_flags |=
TESTPMD_TX_OFFLOAD_PARSE_TUNNEL;
ports[res->port_id].parse_tunnel = 1;
else
ports[res->port_id].tx_ol_flags &=
(~TESTPMD_TX_OFFLOAD_PARSE_TUNNEL);
ports[res->port_id].parse_tunnel = 0;
csum_show(res->port_id);
}
@ -4042,12 +4032,11 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
*/
check_tunnel_tso_nic_support(res->port_id);
if (!(ports[res->port_id].tx_ol_flags &
TESTPMD_TX_OFFLOAD_PARSE_TUNNEL))
if (!ports[res->port_id].parse_tunnel)
printf("Warning: csum parse_tunnel must be set "
"so that tunneled packets are recognized\n");
if (!(ports[res->port_id].tx_ol_flags &
TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM))
if (!(ports[res->port_id].dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
printf("Warning: csum set outer-ip must be set to hw "
"if outer L3 is IPv4; not necessary for IPv6\n");
}
@ -13099,7 +13088,6 @@ cmd_set_macsec_offload_on_parsed(
return;
}
ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MACSEC_INSERT;
#ifdef RTE_LIBRTE_IXGBE_PMD
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
@ -13189,7 +13177,6 @@ cmd_set_macsec_offload_off_parsed(
return;
}
ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
ports[port_id].dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MACSEC_INSERT;
#ifdef RTE_LIBRTE_IXGBE_PMD

View File

@ -2818,7 +2818,6 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
}
tx_vlan_reset(port_id);
ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
ports[port_id].tx_vlan_id = vlan_id;
}
@ -2842,7 +2841,6 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
}
tx_vlan_reset(port_id);
ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_QINQ_INSERT;
ports[port_id].tx_vlan_id = vlan_id;
ports[port_id].tx_vlan_id_outer = vlan_id_outer;
@ -2853,8 +2851,6 @@ tx_vlan_reset(portid_t port_id)
{
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
TESTPMD_TX_OFFLOAD_INSERT_QINQ);
ports[port_id].dev_conf.txmode.offloads &=
~(DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT);

View File

@ -316,7 +316,7 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
* depending on the testpmd command line configuration */
static uint64_t
process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
uint16_t testpmd_ol_flags)
uint64_t tx_offloads)
{
struct ipv4_hdr *ipv4_hdr = l3_hdr;
struct udp_hdr *udp_hdr;
@ -347,7 +347,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
ol_flags |= PKT_TX_IP_CKSUM;
else
ipv4_hdr->hdr_checksum =
@ -363,7 +363,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
udp_hdr->dgram_cksum = 0;
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
ol_flags |= PKT_TX_UDP_CKSUM;
else {
udp_hdr->dgram_cksum =
@ -376,7 +376,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
tcp_hdr->cksum = 0;
if (tso_segsz)
ol_flags |= PKT_TX_TCP_SEG;
else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
ol_flags |= PKT_TX_TCP_CKSUM;
else {
tcp_hdr->cksum =
@ -390,7 +390,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
sctp_hdr->cksum = 0;
/* sctp payload must be a multiple of 4 to be
* offloaded */
if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
((ipv4_hdr->total_length & 0x3) == 0)) {
ol_flags |= PKT_TX_SCTP_CKSUM;
} else {
@ -405,7 +405,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
/* Calculate the checksum of outer header */
static uint64_t
process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
uint16_t testpmd_ol_flags, int tso_enabled)
uint64_t tx_offloads, int tso_enabled)
{
struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
@ -416,7 +416,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_OUTER_IPV4;
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@ -646,7 +646,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint16_t nb_prep;
uint16_t i;
uint64_t rx_ol_flags, tx_ol_flags;
uint16_t testpmd_ol_flags;
uint64_t tx_offloads;
uint32_t retry;
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
@ -678,7 +678,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
gro_enable = gro_ports[fs->rx_port].enable;
txp = &ports[fs->tx_port];
testpmd_ol_flags = txp->tx_ol_flags;
tx_offloads = txp->dev_conf.txmode.offloads;
memset(&info, 0, sizeof(info));
info.tso_segsz = txp->tso_segsz;
info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
@ -714,7 +714,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
l3_hdr = (char *)eth_hdr + info.l2_len;
/* check if it's a supported tunnel */
if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) {
if (txp->parse_tunnel) {
if (info.l4_proto == IPPROTO_UDP) {
struct udp_hdr *udp_hdr;
@ -754,14 +754,14 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
/* process checksums of inner headers first */
tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
testpmd_ol_flags);
tx_offloads);
/* Then process outer headers if any. Note that the software
* checksum will be wrong if one of the inner checksums is
* processed in hardware. */
if (info.is_tunnel == 1) {
tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
testpmd_ol_flags,
tx_offloads,
!!(tx_ol_flags & PKT_TX_TCP_SEG));
}
@ -769,8 +769,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (info.is_tunnel == 1) {
if (info.tunnel_tso_segsz ||
(testpmd_ol_flags &
TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ||
(tx_offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6)) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
@ -832,17 +832,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_be_to_cpu_16(info.outer_ethertype),
info.outer_l3_len);
/* dump tx packet info */
if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM |
TESTPMD_TX_OFFLOAD_UDP_CKSUM |
TESTPMD_TX_OFFLOAD_TCP_CKSUM |
TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) ||
if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
info.tso_segsz != 0)
printf("tx: m->l2_len=%d m->l3_len=%d "
"m->l4_len=%d\n",
m->l2_len, m->l3_len, m->l4_len);
if (info.is_tunnel == 1) {
if ((testpmd_ol_flags &
TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ||
if ((tx_offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",

View File

@ -129,6 +129,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
uint16_t nb_pkt;
uint16_t i;
uint32_t retry;
uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
@ -152,11 +153,12 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
vlan_tci = ports[fs->tx_port].tx_vlan_id;
vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
if (ports[fs->tx_port].tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
if (ports[fs->tx_port].tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
if (ports[fs->tx_port].tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {

View File

@ -55,6 +55,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
uint16_t nb_tx;
uint16_t i;
uint64_t ol_flags = 0;
uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
@ -78,11 +79,12 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
#endif
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))

View File

@ -84,6 +84,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
uint16_t i;
uint32_t retry;
uint64_t ol_flags = 0;
uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
@ -107,11 +108,12 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
#endif
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))

View File

@ -109,26 +109,6 @@ struct fwd_stream {
#endif
};
/** Offload IP checksum in csum forward engine */
#define TESTPMD_TX_OFFLOAD_IP_CKSUM 0x0001
/** Offload UDP checksum in csum forward engine */
#define TESTPMD_TX_OFFLOAD_UDP_CKSUM 0x0002
/** Offload TCP checksum in csum forward engine */
#define TESTPMD_TX_OFFLOAD_TCP_CKSUM 0x0004
/** Offload SCTP checksum in csum forward engine */
#define TESTPMD_TX_OFFLOAD_SCTP_CKSUM 0x0008
/** Offload outer IP checksum in csum forward engine for recognized tunnels */
#define TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM 0x0010
/** Parse tunnel in csum forward engine. If set, dissect tunnel headers
* of rx packets. If not set, treat inner headers as payload. */
#define TESTPMD_TX_OFFLOAD_PARSE_TUNNEL 0x0020
/** Insert VLAN header in forward engine */
#define TESTPMD_TX_OFFLOAD_INSERT_VLAN 0x0040
/** Insert double VLAN header in forward engine */
#define TESTPMD_TX_OFFLOAD_INSERT_QINQ 0x0080
/** Offload MACsec in forward engine */
#define TESTPMD_TX_OFFLOAD_MACSEC 0x0100
/** Descriptor for a single flow. */
struct port_flow {
size_t size; /**< Allocated space including data[]. */
@ -186,7 +166,7 @@ struct rte_port {
struct fwd_stream *rx_stream; /**< Port RX stream, if unique */
struct fwd_stream *tx_stream; /**< Port TX stream, if unique */
unsigned int socket_id; /**< For NUMA support */
uint16_t tx_ol_flags;/**< TX Offload Flags (TESTPMD_TX_OFFLOAD...). */
uint16_t parse_tunnel:1; /**< Parse internal headers */
uint16_t tso_segsz; /**< Segmentation offload MSS for non-tunneled packets. */
uint16_t tunnel_tso_segsz; /**< Segmentation offload MSS for tunneled pkts. */
uint16_t tx_vlan_id;/**< The tag ID */

View File

@ -165,6 +165,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
uint32_t retry;
uint64_t ol_flags = 0;
uint8_t i;
uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
@ -178,13 +179,14 @@ pkt_burst_transmit(struct fwd_stream *fs)
mbp = current_fwd_lcore()->mbp;
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
pkt = rte_mbuf_raw_alloc(mbp);