mbuf: add namespace to offload flags

Fix the mbuf offload flags namespace by adding an RTE_ prefix to the
name. The old flags remain usable, but a deprecation warning is issued
at compilation.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
This commit is contained in:
Olivier Matz 2021-10-15 21:24:08 +02:00 committed by David Marchand
parent 735155ee3b
commit daa02b5cdd
172 changed files with 3121 additions and 3016 deletions

View File

@ -481,12 +481,12 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
ipv4_hdr = l3_hdr;
ol_flags |= PKT_TX_IPV4;
ol_flags |= RTE_MBUF_F_TX_IPV4;
if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
ol_flags |= PKT_TX_IP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
} else {
if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
ol_flags |= PKT_TX_IP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
} else {
ipv4_hdr->hdr_checksum = 0;
ipv4_hdr->hdr_checksum =
@ -494,7 +494,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
}
}
} else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6))
ol_flags |= PKT_TX_IPV6;
ol_flags |= RTE_MBUF_F_TX_IPV6;
else
return 0; /* packet type not supported, nothing to do */
@ -503,7 +503,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
ol_flags |= PKT_TX_UDP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
} else {
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum =
@ -512,13 +512,13 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
}
}
if (info->gso_enable)
ol_flags |= PKT_TX_UDP_SEG;
ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
if (tso_segsz)
ol_flags |= PKT_TX_TCP_SEG;
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= PKT_TX_TCP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
} else {
tcp_hdr->cksum = 0;
tcp_hdr->cksum =
@ -526,7 +526,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
info->ethertype);
}
if (info->gso_enable)
ol_flags |= PKT_TX_TCP_SEG;
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
} else if (info->l4_proto == IPPROTO_SCTP) {
sctp_hdr = (struct rte_sctp_hdr *)
((char *)l3_hdr + info->l3_len);
@ -534,7 +534,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
* offloaded */
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
((ipv4_hdr->total_length & 0x3) == 0)) {
ol_flags |= PKT_TX_SCTP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
} else {
sctp_hdr->cksum = 0;
/* XXX implement CRC32c, example available in
@ -557,14 +557,14 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_OUTER_IPV4;
ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
} else
ol_flags |= PKT_TX_OUTER_IPV6;
ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6;
if (info->outer_l4_proto != IPPROTO_UDP)
return ol_flags;
@ -573,7 +573,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
((char *)outer_l3_hdr + info->outer_l3_len);
if (tso_enabled)
ol_flags |= PKT_TX_TCP_SEG;
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
/* Skip SW outer UDP checksum generation if HW supports it */
if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
@ -584,7 +584,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
udp_hdr->dgram_cksum
= rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
ol_flags |= PKT_TX_OUTER_UDP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM;
return ol_flags;
}
@ -855,17 +855,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
info.is_tunnel = 0;
info.pkt_len = rte_pktmbuf_pkt_len(m);
tx_ol_flags = m->ol_flags &
(IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF);
(RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL);
rx_ol_flags = m->ol_flags;
/* Update the L3/L4 checksum error packet statistics */
if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD)
if ((rx_ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) == RTE_MBUF_F_RX_IP_CKSUM_BAD)
rx_bad_ip_csum += 1;
if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)
if ((rx_ol_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) == RTE_MBUF_F_RX_L4_CKSUM_BAD)
rx_bad_l4_csum += 1;
if (rx_ol_flags & PKT_RX_OUTER_L4_CKSUM_BAD)
if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD)
rx_bad_outer_l4_csum += 1;
if (rx_ol_flags & PKT_RX_OUTER_IP_CKSUM_BAD)
if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD)
rx_bad_outer_ip_csum += 1;
/* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
@ -888,26 +888,26 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
((char *)l3_hdr + info.l3_len);
parse_gtp(udp_hdr, &info);
if (info.is_tunnel) {
tx_ol_flags |= PKT_TX_TUNNEL_GTP;
tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GTP;
goto tunnel_update;
}
parse_vxlan_gpe(udp_hdr, &info);
if (info.is_tunnel) {
tx_ol_flags |=
PKT_TX_TUNNEL_VXLAN_GPE;
RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE;
goto tunnel_update;
}
parse_vxlan(udp_hdr, &info,
m->packet_type);
if (info.is_tunnel) {
tx_ol_flags |=
PKT_TX_TUNNEL_VXLAN;
RTE_MBUF_F_TX_TUNNEL_VXLAN;
goto tunnel_update;
}
parse_geneve(udp_hdr, &info);
if (info.is_tunnel) {
tx_ol_flags |=
PKT_TX_TUNNEL_GENEVE;
RTE_MBUF_F_TX_TUNNEL_GENEVE;
goto tunnel_update;
}
} else if (info.l4_proto == IPPROTO_GRE) {
@ -917,14 +917,14 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
((char *)l3_hdr + info.l3_len);
parse_gre(gre_hdr, &info);
if (info.is_tunnel)
tx_ol_flags |= PKT_TX_TUNNEL_GRE;
tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GRE;
} else if (info.l4_proto == IPPROTO_IPIP) {
void *encap_ip_hdr;
encap_ip_hdr = (char *)l3_hdr + info.l3_len;
parse_encap_ip(encap_ip_hdr, &info);
if (info.is_tunnel)
tx_ol_flags |= PKT_TX_TUNNEL_IPIP;
tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_IPIP;
}
}
@ -950,7 +950,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (info.is_tunnel == 1) {
tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
tx_offloads,
!!(tx_ol_flags & PKT_TX_TCP_SEG));
!!(tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG));
}
/* step 3: fill the mbuf meta data (flags and header lengths) */
@ -1014,7 +1014,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
"l4_proto=%d l4_len=%d flags=%s\n",
info.l2_len, rte_be_to_cpu_16(info.ethertype),
info.l3_len, info.l4_proto, info.l4_len, buf);
if (rx_ol_flags & PKT_RX_LRO)
if (rx_ol_flags & RTE_MBUF_F_RX_LRO)
printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
if (info.is_tunnel == 1)
printf("rx: outer_l2_len=%d outer_ethertype=%x "
@ -1035,17 +1035,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6))
(tx_ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",
m->outer_l2_len,
m->outer_l3_len);
if (info.tunnel_tso_segsz != 0 &&
(m->ol_flags & PKT_TX_TCP_SEG))
(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
printf("tx: m->tso_segsz=%d\n",
m->tso_segsz);
} else if (info.tso_segsz != 0 &&
(m->ol_flags & PKT_TX_TCP_SEG))
(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
printf("tx: flags=%s", buf);

View File

@ -100,11 +100,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags |= PKT_TX_VLAN;
ol_flags |= RTE_MBUF_F_TX_VLAN;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ;
ol_flags |= RTE_MBUF_F_TX_QINQ;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
ol_flags |= RTE_MBUF_F_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
if (!nb_pkt || !nb_clones) {
@ -152,7 +152,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
sizeof(*ip_hdr));
pkt->nb_segs = 1;
pkt->pkt_len = pkt_size;
pkt->ol_flags &= EXT_ATTACHED_MBUF;
pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
pkt->ol_flags |= ol_flags;
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;

View File

@ -114,7 +114,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
if (!(mb->ol_flags & RTE_MBUF_F_RX_IEEE1588_PTP)) {
if (eth_type == RTE_ETHER_TYPE_1588) {
printf("Port %u Received PTP packet not filtered"
" by hardware\n",
@ -163,7 +163,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
* Check that the received PTP packet has been timestamped by the
* hardware.
*/
if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) {
if (!(mb->ol_flags & RTE_MBUF_F_RX_IEEE1588_TMST)) {
printf("Port %u Received PTP packet not timestamped"
" by hardware\n",
fs->rx_port);
@ -183,7 +183,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
rte_ether_addr_copy(&addr, &eth_hdr->src_addr);
/* Forward PTP packet with hardware TX timestamp */
mb->ol_flags |= PKT_TX_IEEE1588_TMST;
mb->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST;
fs->tx_packets += 1;
if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) {
printf("Port %u sent PTP packet dropped\n", fs->rx_port);

View File

@ -73,11 +73,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN;
ol_flags = RTE_MBUF_F_TX_VLAN;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ;
ol_flags |= RTE_MBUF_F_TX_QINQ;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
ol_flags |= RTE_MBUF_F_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
@ -88,7 +88,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
&eth_hdr->dst_addr);
rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
&eth_hdr->src_addr);
mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
mb->ol_flags &= RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL;
mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct rte_ether_hdr);
mb->l3_len = sizeof(struct rte_ipv4_hdr);

View File

@ -11,11 +11,11 @@ ol_flags_init(uint64_t tx_offload)
uint64_t ol_flags = 0;
ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
PKT_TX_VLAN : 0;
RTE_MBUF_F_TX_VLAN : 0;
ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
PKT_TX_QINQ : 0;
RTE_MBUF_F_TX_QINQ : 0;
ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
PKT_TX_MACSEC : 0;
RTE_MBUF_F_TX_MACSEC : 0;
return ol_flags;
}
@ -26,10 +26,10 @@ vlan_qinq_set(struct rte_mbuf *pkts[], uint16_t nb,
{
int i;
if (ol_flags & PKT_TX_VLAN)
if (ol_flags & RTE_MBUF_F_TX_VLAN)
for (i = 0; i < nb; i++)
pkts[i]->vlan_tci = vlan;
if (ol_flags & PKT_TX_QINQ)
if (ol_flags & RTE_MBUF_F_TX_QINQ)
for (i = 0; i < nb; i++)
pkts[i]->vlan_tci_outer = outer_vlan;
}
@ -37,7 +37,7 @@ vlan_qinq_set(struct rte_mbuf *pkts[], uint16_t nb,
static inline void
mbuf_field_set(struct rte_mbuf *mb, uint64_t ol_flags)
{
mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
mb->ol_flags &= RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL;
mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct rte_ether_hdr);
mb->l3_len = sizeof(struct rte_ipv4_hdr);

View File

@ -214,7 +214,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
rte_pktmbuf_reset_headroom(pkt);
pkt->data_len = tx_pkt_seg_lengths[0];
pkt->ol_flags &= EXT_ATTACHED_MBUF;
pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
pkt->ol_flags |= ol_flags;
pkt->vlan_tci = vlan_tci;
pkt->vlan_tci_outer = vlan_tci_outer;
@ -355,11 +355,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN;
ol_flags = RTE_MBUF_F_TX_VLAN;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ;
ol_flags |= RTE_MBUF_F_TX_QINQ;
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
ol_flags |= RTE_MBUF_F_TX_MACSEC;
/*
* Initialize Ethernet header.

View File

@ -157,20 +157,20 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
eth_type, (unsigned int) mb->pkt_len,
(int)mb->nb_segs);
ol_flags = mb->ol_flags;
if (ol_flags & PKT_RX_RSS_HASH) {
if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) {
MKDUMPSTR(print_buf, buf_size, cur_len,
" - RSS hash=0x%x",
(unsigned int) mb->hash.rss);
MKDUMPSTR(print_buf, buf_size, cur_len,
" - RSS queue=0x%x", (unsigned int) queue);
}
if (ol_flags & PKT_RX_FDIR) {
if (ol_flags & RTE_MBUF_F_RX_FDIR) {
MKDUMPSTR(print_buf, buf_size, cur_len,
" - FDIR matched ");
if (ol_flags & PKT_RX_FDIR_ID)
if (ol_flags & RTE_MBUF_F_RX_FDIR_ID)
MKDUMPSTR(print_buf, buf_size, cur_len,
"ID=0x%x", mb->hash.fdir.hi);
else if (ol_flags & PKT_RX_FDIR_FLX)
else if (ol_flags & RTE_MBUF_F_RX_FDIR_FLX)
MKDUMPSTR(print_buf, buf_size, cur_len,
"flex bytes=0x%08x %08x",
mb->hash.fdir.hi, mb->hash.fdir.lo);
@ -182,18 +182,18 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
if (is_timestamp_enabled(mb))
MKDUMPSTR(print_buf, buf_size, cur_len,
" - timestamp %"PRIu64" ", get_timestamp(mb));
if (ol_flags & PKT_RX_QINQ)
if (ol_flags & RTE_MBUF_F_RX_QINQ)
MKDUMPSTR(print_buf, buf_size, cur_len,
" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
mb->vlan_tci, mb->vlan_tci_outer);
else if (ol_flags & PKT_RX_VLAN)
else if (ol_flags & RTE_MBUF_F_RX_VLAN)
MKDUMPSTR(print_buf, buf_size, cur_len,
" - VLAN tci=0x%x", mb->vlan_tci);
if (!is_rx && (ol_flags & PKT_TX_DYNF_METADATA))
if (!is_rx && (ol_flags & RTE_MBUF_DYNFLAG_TX_METADATA))
MKDUMPSTR(print_buf, buf_size, cur_len,
" - Tx metadata: 0x%x",
*RTE_FLOW_DYNF_METADATA(mb));
if (is_rx && (ol_flags & PKT_RX_DYNF_METADATA))
if (is_rx && (ol_flags & RTE_MBUF_DYNFLAG_RX_METADATA))
MKDUMPSTR(print_buf, buf_size, cur_len,
" - Rx metadata: 0x%x",
*RTE_FLOW_DYNF_METADATA(mb));
@ -331,7 +331,7 @@ tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue,
for (i = 0; i < nb_pkts; i++) {
*RTE_FLOW_DYNF_METADATA(pkts[i]) =
ports[port_id].tx_metadata;
pkts[i]->ol_flags |= PKT_TX_DYNF_METADATA;
pkts[i]->ol_flags |= RTE_MBUF_DYNFLAG_TX_METADATA;
}
return nb_pkts;
}

View File

@ -524,7 +524,7 @@ test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
flags->ip_csum) {
if (m->ol_flags & PKT_RX_IP_CKSUM_GOOD)
if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
ret = test_ipsec_l3_csum_verify(m);
else
ret = TEST_FAILED;
@ -537,7 +537,7 @@ test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
flags->l4_csum) {
if (m->ol_flags & PKT_RX_L4_CKSUM_GOOD)
if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
ret = test_ipsec_l4_csum_verify(m);
else
ret = TEST_FAILED;

View File

@ -1622,8 +1622,8 @@ inline_outb_burst_null_null_check(struct ipsec_unitest_params *ut_params,
"ibuf pkt_len is not equal to obuf pkt_len");
/* check mbuf ol_flags */
TEST_ASSERT(ut_params->ibuf[j]->ol_flags & PKT_TX_SEC_OFFLOAD,
"ibuf PKT_TX_SEC_OFFLOAD is not set");
TEST_ASSERT(ut_params->ibuf[j]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD,
"ibuf RTE_MBUF_F_TX_SEC_OFFLOAD is not set");
}
return 0;
}

View File

@ -1495,7 +1495,7 @@ test_get_rx_ol_flag_list(void)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
/* Test case to check with zero buffer len */
ret = rte_get_rx_ol_flag_list(PKT_RX_L4_CKSUM_MASK, buf, 0);
ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_L4_CKSUM_MASK, buf, 0);
if (ret != -1)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
@ -1526,7 +1526,8 @@ test_get_rx_ol_flag_list(void)
"non-zero, buffer should not be empty");
/* Test case to check with valid mask value */
ret = rte_get_rx_ol_flag_list(PKT_RX_SEC_OFFLOAD, buf, sizeof(buf));
ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_SEC_OFFLOAD, buf,
sizeof(buf));
if (ret != 0)
GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
@ -1553,7 +1554,7 @@ test_get_tx_ol_flag_list(void)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
/* Test case to check with zero buffer len */
ret = rte_get_tx_ol_flag_list(PKT_TX_IP_CKSUM, buf, 0);
ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_IP_CKSUM, buf, 0);
if (ret != -1)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
@ -1585,7 +1586,8 @@ test_get_tx_ol_flag_list(void)
"non-zero, buffer should not be empty");
/* Test case to check with valid mask value */
ret = rte_get_tx_ol_flag_list(PKT_TX_UDP_CKSUM, buf, sizeof(buf));
ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_UDP_CKSUM, buf,
sizeof(buf));
if (ret != 0)
GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
@ -1611,28 +1613,28 @@ test_get_rx_ol_flag_name(void)
uint16_t i;
const char *flag_str = NULL;
const struct flag_name rx_flags[] = {
VAL_NAME(PKT_RX_VLAN),
VAL_NAME(PKT_RX_RSS_HASH),
VAL_NAME(PKT_RX_FDIR),
VAL_NAME(PKT_RX_L4_CKSUM_BAD),
VAL_NAME(PKT_RX_L4_CKSUM_GOOD),
VAL_NAME(PKT_RX_L4_CKSUM_NONE),
VAL_NAME(PKT_RX_IP_CKSUM_BAD),
VAL_NAME(PKT_RX_IP_CKSUM_GOOD),
VAL_NAME(PKT_RX_IP_CKSUM_NONE),
VAL_NAME(PKT_RX_OUTER_IP_CKSUM_BAD),
VAL_NAME(PKT_RX_VLAN_STRIPPED),
VAL_NAME(PKT_RX_IEEE1588_PTP),
VAL_NAME(PKT_RX_IEEE1588_TMST),
VAL_NAME(PKT_RX_FDIR_ID),
VAL_NAME(PKT_RX_FDIR_FLX),
VAL_NAME(PKT_RX_QINQ_STRIPPED),
VAL_NAME(PKT_RX_LRO),
VAL_NAME(PKT_RX_SEC_OFFLOAD),
VAL_NAME(PKT_RX_SEC_OFFLOAD_FAILED),
VAL_NAME(PKT_RX_OUTER_L4_CKSUM_BAD),
VAL_NAME(PKT_RX_OUTER_L4_CKSUM_GOOD),
VAL_NAME(PKT_RX_OUTER_L4_CKSUM_INVALID),
VAL_NAME(RTE_MBUF_F_RX_VLAN),
VAL_NAME(RTE_MBUF_F_RX_RSS_HASH),
VAL_NAME(RTE_MBUF_F_RX_FDIR),
VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_BAD),
VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_GOOD),
VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_NONE),
VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_BAD),
VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_GOOD),
VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_NONE),
VAL_NAME(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD),
VAL_NAME(RTE_MBUF_F_RX_VLAN_STRIPPED),
VAL_NAME(RTE_MBUF_F_RX_IEEE1588_PTP),
VAL_NAME(RTE_MBUF_F_RX_IEEE1588_TMST),
VAL_NAME(RTE_MBUF_F_RX_FDIR_ID),
VAL_NAME(RTE_MBUF_F_RX_FDIR_FLX),
VAL_NAME(RTE_MBUF_F_RX_QINQ_STRIPPED),
VAL_NAME(RTE_MBUF_F_RX_LRO),
VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD),
VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED),
VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD),
VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD),
VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID),
};
/* Test case to check with valid flag */
@ -1663,31 +1665,31 @@ test_get_tx_ol_flag_name(void)
uint16_t i;
const char *flag_str = NULL;
const struct flag_name tx_flags[] = {
VAL_NAME(PKT_TX_VLAN),
VAL_NAME(PKT_TX_IP_CKSUM),
VAL_NAME(PKT_TX_TCP_CKSUM),
VAL_NAME(PKT_TX_SCTP_CKSUM),
VAL_NAME(PKT_TX_UDP_CKSUM),
VAL_NAME(PKT_TX_IEEE1588_TMST),
VAL_NAME(PKT_TX_TCP_SEG),
VAL_NAME(PKT_TX_IPV4),
VAL_NAME(PKT_TX_IPV6),
VAL_NAME(PKT_TX_OUTER_IP_CKSUM),
VAL_NAME(PKT_TX_OUTER_IPV4),
VAL_NAME(PKT_TX_OUTER_IPV6),
VAL_NAME(PKT_TX_TUNNEL_VXLAN),
VAL_NAME(PKT_TX_TUNNEL_GRE),
VAL_NAME(PKT_TX_TUNNEL_IPIP),
VAL_NAME(PKT_TX_TUNNEL_GENEVE),
VAL_NAME(PKT_TX_TUNNEL_MPLSINUDP),
VAL_NAME(PKT_TX_TUNNEL_VXLAN_GPE),
VAL_NAME(PKT_TX_TUNNEL_IP),
VAL_NAME(PKT_TX_TUNNEL_UDP),
VAL_NAME(PKT_TX_QINQ),
VAL_NAME(PKT_TX_MACSEC),
VAL_NAME(PKT_TX_SEC_OFFLOAD),
VAL_NAME(PKT_TX_UDP_SEG),
VAL_NAME(PKT_TX_OUTER_UDP_CKSUM),
VAL_NAME(RTE_MBUF_F_TX_VLAN),
VAL_NAME(RTE_MBUF_F_TX_IP_CKSUM),
VAL_NAME(RTE_MBUF_F_TX_TCP_CKSUM),
VAL_NAME(RTE_MBUF_F_TX_SCTP_CKSUM),
VAL_NAME(RTE_MBUF_F_TX_UDP_CKSUM),
VAL_NAME(RTE_MBUF_F_TX_IEEE1588_TMST),
VAL_NAME(RTE_MBUF_F_TX_TCP_SEG),
VAL_NAME(RTE_MBUF_F_TX_IPV4),
VAL_NAME(RTE_MBUF_F_TX_IPV6),
VAL_NAME(RTE_MBUF_F_TX_OUTER_IP_CKSUM),
VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV4),
VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV6),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GRE),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IPIP),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GENEVE),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_MPLSINUDP),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IP),
VAL_NAME(RTE_MBUF_F_TX_TUNNEL_UDP),
VAL_NAME(RTE_MBUF_F_TX_QINQ),
VAL_NAME(RTE_MBUF_F_TX_MACSEC),
VAL_NAME(RTE_MBUF_F_TX_SEC_OFFLOAD),
VAL_NAME(RTE_MBUF_F_TX_UDP_SEG),
VAL_NAME(RTE_MBUF_F_TX_OUTER_UDP_CKSUM),
};
/* Test case to check with valid flag */
@ -1755,8 +1757,8 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
/* test to validate if IP checksum is counted only for IPV4 packet */
/* set both IP checksum and IPV6 flags */
ol_flags |= PKT_TX_IP_CKSUM;
ol_flags |= PKT_TX_IPV6;
ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_IPV6;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
@ -1765,14 +1767,14 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
ol_flags = 0;
/* test to validate if IP type is set when required */
ol_flags |= PKT_TX_L4_MASK;
ol_flags |= RTE_MBUF_F_TX_L4_MASK;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
/* test if IP type is set when TCP SEG is on */
ol_flags |= PKT_TX_TCP_SEG;
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
@ -1780,8 +1782,8 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
ol_flags = 0;
/* test to confirm IP type (IPV4/IPV6) is set */
ol_flags = PKT_TX_L4_MASK;
ol_flags |= PKT_TX_IPV6;
ol_flags = RTE_MBUF_F_TX_L4_MASK;
ol_flags |= RTE_MBUF_F_TX_IPV6;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
pktmbuf_pool,
ol_flags, 0, 0) < 0)
@ -1789,15 +1791,15 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
ol_flags = 0;
/* test to check TSO segment size is non-zero */
ol_flags |= PKT_TX_IPV4;
ol_flags |= PKT_TX_TCP_SEG;
ol_flags |= RTE_MBUF_F_TX_IPV4;
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
/* set 0 tso segment size */
if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
/* retain IPV4 and PKT_TX_TCP_SEG mask */
/* retain IPV4 and RTE_MBUF_F_TX_TCP_SEG mask */
/* set valid tso segment size but IP CKSUM not set */
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
pktmbuf_pool,
@ -1806,7 +1808,7 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
/* test to validate if IP checksum is set for TSO capability */
/* retain IPV4, TCP_SEG, tso_seg size */
ol_flags |= PKT_TX_IP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
@ -1814,8 +1816,8 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
/* test to confirm TSO for IPV6 type */
ol_flags = 0;
ol_flags |= PKT_TX_IPV6;
ol_flags |= PKT_TX_TCP_SEG;
ol_flags |= RTE_MBUF_F_TX_IPV6;
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
@ -1823,8 +1825,8 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
ol_flags = 0;
/* test if outer IP checksum set for non outer IPv4 packet */
ol_flags |= PKT_TX_IPV6;
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_IPV6;
ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
pktmbuf_pool,
ol_flags, 512, -EINVAL) < 0)
@ -1832,8 +1834,8 @@ test_mbuf_validate_tx_offload_one(struct rte_mempool *pktmbuf_pool)
ol_flags = 0;
/* test to confirm outer IP checksum is set for outer IPV4 packet */
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
ol_flags |= PKT_TX_OUTER_IPV4;
ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
@ -2366,7 +2368,7 @@ test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
buf_iova = rte_mem_virt2iova(ext_buf_addr);
rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
ret_shinfo);
if (m->ol_flags != EXT_ATTACHED_MBUF)
if (m->ol_flags != RTE_MBUF_F_EXTERNAL)
GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
__func__);
@ -2380,7 +2382,7 @@ test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool)
/* attach the same external buffer to the cloned mbuf */
rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
ret_shinfo);
if (clone->ol_flags != EXT_ATTACHED_MBUF)
if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
__func__);
@ -2672,8 +2674,8 @@ test_mbuf_dyn(struct rte_mempool *pktmbuf_pool)
flag2, strerror(errno));
flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
rte_bsf64(PKT_LAST_FREE));
if (flag3 != rte_bsf64(PKT_LAST_FREE))
rte_bsf64(RTE_MBUF_F_LAST_FREE));
if (flag3 != rte_bsf64(RTE_MBUF_F_LAST_FREE))
GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
flag3, strerror(errno));

View File

@ -512,9 +512,9 @@ configured TPID.
// enable VLAN insert offload
testpmd> port config (port_id) rx_offload vlan_insert|qinq_insert (on|off)
if (mbuf->ol_flags && PKT_TX_QINQ) // case-1: insert VLAN to single-tagged packet
if (mbuf->ol_flags && RTE_MBUF_F_TX_QINQ) // case-1: insert VLAN to single-tagged packet
    tci_value = mbuf->vlan_tci_outer
else if (mbuf->ol_flags && PKT_TX_VLAN) // case-2: insert VLAN to untagged packet
else if (mbuf->ol_flags && RTE_MBUF_F_TX_VLAN) // case-2: insert VLAN to untagged packet
    tci_value = mbuf->vlan_tci
VLAN Strip
@ -528,7 +528,7 @@ The application configures the per-port VLAN strip offload.
testpmd> port config (port_id) tx_offload vlan_strip (on|off)
// notify application VLAN strip via mbuf
mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_STRIPPED // outer VLAN is found and stripped
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_STRIPPED // outer VLAN is found and stripped
mbuf->vlan_tci = tci_value // TCI of the stripped VLAN
Time Synchronization
@ -552,7 +552,7 @@ packets to application via mbuf.
.. code-block:: console
// RX packet completion will indicate whether the packet is PTP
mbuf->ol_flags |= PKT_RX_IEEE1588_PTP
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
Statistics Collection
~~~~~~~~~~~~~~~~~~~~~

View File

@ -279,9 +279,9 @@ inner and outer packets can be IPv4 or IPv6.
- Rx checksum offloads.
The NIC validates IPv4/UDP/TCP checksums of both inner and outer packets.
Good checksum flags (e.g. ``PKT_RX_L4_CKSUM_GOOD``) indicate that the inner
Good checksum flags (e.g. ``RTE_MBUF_F_RX_L4_CKSUM_GOOD``) indicate that the inner
packet has the correct checksum, and if applicable, the outer packet also
has the correct checksum. Bad checksum flags (e.g. ``PKT_RX_L4_CKSUM_BAD``)
has the correct checksum. Bad checksum flags (e.g. ``RTE_MBUF_F_RX_L4_CKSUM_BAD``)
indicate that the inner and/or outer packets have invalid checksum values.
- Inner Rx packet type classification
@ -437,8 +437,8 @@ Limitations
Another alternative is modify the adapter's ingress VLAN rewrite mode so that
packets with the default VLAN tag are stripped by the adapter and presented to
DPDK as untagged packets. In this case mbuf->vlan_tci and the PKT_RX_VLAN and
PKT_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the
DPDK as untagged packets. In this case mbuf->vlan_tci and the RTE_MBUF_F_RX_VLAN and
RTE_MBUF_F_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the
``devargs`` parameter ``ig-vlan-rewrite=untag``. For example::
-a 12:00.0,ig-vlan-rewrite=untag

View File

@ -197,7 +197,7 @@ Supports Large Receive Offload.
``dev_conf.rxmode.max_lro_pkt_size``.
* **[implements] datapath**: ``LRO functionality``.
* **[implements] rte_eth_dev_data**: ``lro``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_LRO``, ``mbuf.tso_segsz``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
* **[provides] rte_eth_dev_info**: ``max_lro_pkt_size``.
@ -211,7 +211,7 @@ Supports TCP Segmentation Offloading.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
* **[uses] mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:`` ``RTE_MBUF_F_TX_TCP_SEG``, ``RTE_MBUF_F_TX_IPV4``, ``RTE_MBUF_F_TX_IPV6``, ``RTE_MBUF_F_TX_IP_CKSUM``.
* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
* **[implements] datapath**: ``TSO functionality``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
@ -279,7 +279,7 @@ Supports RSS hashing on RX.
* **[uses] user config**: ``dev_conf.rx_adv_conf.rss_conf``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_RSS_HASH``, ``mbuf.rss``.
.. _nic_features_inner_rss:
@ -291,7 +291,7 @@ Supports RX RSS hashing on Inner headers.
* **[uses] rte_flow_action_rss**: ``level``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_RSS_HASH``, ``mbuf.rss``.
.. _nic_features_rss_key_update:
@ -411,8 +411,8 @@ of protocol operations. See Security library and PMD documentation for more deta
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:RTE_MBUF_F_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@ -434,8 +434,8 @@ protocol operations. See security library and PMD documentation for more details
``capabilities_get``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:RTE_MBUF_F_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@ -459,9 +459,9 @@ Supports VLAN offload to hardware.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_VLAN``, ``mbuf.vlan_tci``.
* **[implements] eth_dev_ops**: ``vlan_offload_set``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN`` ``mbuf.vlan_tci``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[related] API**: ``rte_eth_dev_set_vlan_offload()``,
@ -477,9 +477,9 @@ Supports QinQ (queue in queue) offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_QINQ``, ``mbuf.vlan_tci_outer``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:RTE_MBUF_F_RX_QINQ``,
``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:RTE_MBUF_F_RX_VLAN``
``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
@ -509,12 +509,12 @@ Supports L3 checksum offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_IP_CKSUM``,
``mbuf.ol_flags:RTE_MBUF_F_TX_IPV4`` | ``RTE_MBUF_F_TX_IPV6``.
* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
``PKT_RX_IP_CKSUM_NONE``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN`` |
``RTE_MBUF_F_RX_IP_CKSUM_BAD`` | ``RTE_MBUF_F_RX_IP_CKSUM_GOOD`` |
``RTE_MBUF_F_RX_IP_CKSUM_NONE``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
@ -528,13 +528,13 @@ Supports L4 checksum offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_IPV4`` | ``RTE_MBUF_F_TX_IPV6``,
``mbuf.ol_flags:RTE_MBUF_F_TX_L4_NO_CKSUM`` | ``RTE_MBUF_F_TX_TCP_CKSUM`` |
``RTE_MBUF_F_TX_SCTP_CKSUM`` | ``RTE_MBUF_F_TX_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
``PKT_RX_L4_CKSUM_NONE``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN`` |
``RTE_MBUF_F_RX_L4_CKSUM_BAD`` | ``RTE_MBUF_F_RX_L4_CKSUM_GOOD`` |
``RTE_MBUF_F_RX_L4_CKSUM_NONE``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
@ -546,7 +546,7 @@ Timestamp offload
Supports Timestamp.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.timestamp``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[related] eth_dev_ops**: ``read_clock``.
@ -560,7 +560,7 @@ Supports MACsec.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_MACSEC``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
@ -574,12 +574,12 @@ Supports inner packet L3 checksum.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_IP_CKSUM``,
``mbuf.ol_flags:RTE_MBUF_F_TX_IPV4`` | ``RTE_MBUF_F_TX_IPV6``,
``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_IP_CKSUM``,
``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_IPV4`` | ``RTE_MBUF_F_TX_OUTER_IPV6``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
@ -592,11 +592,11 @@ Inner L4 checksum
Supports inner packet L4 checksum.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
* **[provides] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN`` |
``RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD`` | ``RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD`` | ``RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_IPV4`` | ``RTE_MBUF_F_TX_OUTER_IPV6``.
``mbuf.ol_flags:RTE_MBUF_F_TX_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.

View File

@ -284,7 +284,7 @@ Intel 82599 10 Gigabit Ethernet Controller Specification Update (Revision 2.87)
Errata: 44 Integrity Error Reported for IPv4/UDP Packets With Zero Checksum
To support UDP zero checksum, the zero and bad UDP checksum packet is marked as
PKT_RX_L4_CKSUM_UNKNOWN, so the application needs to recompute the checksum to
RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN, so the application needs to recompute the checksum to
validate it.
Inline crypto processing support

View File

@ -255,7 +255,7 @@ Limitations
no MPRQ feature or vectorized code can be engaged.
- When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be
externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in
externally attached to a user-provided mbuf with having RTE_MBUF_F_EXTERNAL in
ol_flags. As the mempool for the external buffer is managed by PMD, all the
Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
the external buffers will be freed by PMD and the application which still
@ -263,7 +263,7 @@ Limitations
- If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is
enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
supported. Some Rx packets may not have PKT_RX_RSS_HASH.
supported. Some Rx packets may not have RTE_MBUF_F_RX_RSS_HASH.
- IPv6 Multicast messages are not supported on VM, while promiscuous mode
and allmulticast mode are both set to off.
@ -648,7 +648,7 @@ Driver options
the mbuf by external buffer attachment - ``rte_pktmbuf_attach_extbuf()``.
A mempool for external buffers will be allocated and managed by PMD. If Rx
packet is externally attached, ol_flags field of the mbuf will have
EXT_ATTACHED_MBUF and this flag must be preserved. ``RTE_MBUF_HAS_EXTBUF()``
RTE_MBUF_F_EXTERNAL and this flag must be preserved. ``RTE_MBUF_HAS_EXTBUF()``
checks the flag. The default value is 128, valid only if ``mprq_en`` is set.
- ``rxqs_min_mprq`` parameter [int]

View File

@ -211,11 +211,11 @@ To segment an outgoing packet, an application must:
responsibility to ensure that these flags are set.
- For example, in order to segment TCP/IPv4 packets, the application should
add the ``PKT_TX_IPV4`` and ``PKT_TX_TCP_SEG`` flags to the mbuf's
add the ``RTE_MBUF_F_TX_IPV4`` and ``RTE_MBUF_F_TX_TCP_SEG`` flags to the mbuf's
ol_flags.
- If checksum calculation in hardware is required, the application should
also add the ``PKT_TX_TCP_CKSUM`` and ``PKT_TX_IP_CKSUM`` flags.
also add the ``RTE_MBUF_F_TX_TCP_CKSUM`` and ``RTE_MBUF_F_TX_IP_CKSUM`` flags.
#. Check if the packet should be processed. Packets with one of the
following properties are not processed and are returned immediately:

View File

@ -123,7 +123,7 @@ timestamp mechanism, the VLAN tagging and the IP checksum computation.
On TX side, it is also possible for an application to delegate some
processing to the hardware if it supports it. For instance, the
PKT_TX_IP_CKSUM flag allows to offload the computation of the IPv4
RTE_MBUF_F_TX_IP_CKSUM flag allows to offload the computation of the IPv4
checksum.
The following examples explain how to configure different TX offloads on
@ -134,7 +134,7 @@ a vxlan-encapsulated tcp packet:
mb->l2_len = len(out_eth)
mb->l3_len = len(out_ip)
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM
set out_ip checksum to 0 in the packet
This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
@ -143,7 +143,7 @@ a vxlan-encapsulated tcp packet:
mb->l2_len = len(out_eth)
mb->l3_len = len(out_ip)
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM | PKT_TX_UDP_CKSUM
mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_UDP_CKSUM
set out_ip checksum to 0 in the packet
set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
@ -154,7 +154,7 @@ a vxlan-encapsulated tcp packet:
mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM
set in_ip checksum to 0 in the packet
This is similar to case 1), but l2_len is different. It is supported
@ -165,7 +165,7 @@ a vxlan-encapsulated tcp packet:
mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM | PKT_TX_TCP_CKSUM
mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_TCP_CKSUM
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
@ -179,8 +179,8 @@ a vxlan-encapsulated tcp packet:
mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
mb->l4_len = len(in_tcp)
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM |
PKT_TX_TCP_SEG;
mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM |
RTE_MBUF_F_TX_TCP_SEG;
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header without including the IP
payload length using rte_ipv4_phdr_cksum()
@ -194,8 +194,8 @@ a vxlan-encapsulated tcp packet:
mb->outer_l3_len = len(out_ip)
mb->l2_len = len(out_udp + vxlan + in_eth)
mb->l3_len = len(in_ip)
mb->ol_flags |= PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM;
mb->ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM;
set out_ip checksum to 0 in the packet
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()

View File

@ -290,7 +290,7 @@ Timestamp and latency calculation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Latency stats library marks the time in the timestamp field of the
mbuf for the ingress packets and sets the ``PKT_RX_TIMESTAMP`` flag of
mbuf for the ingress packets and sets the ``RTE_MBUF_F_RX_TIMESTAMP`` flag of
``ol_flags`` for the mbuf to indicate the marked time as a valid one.
At the egress, the mbufs with the flag set are considered having valid
timestamp and are used for the latency calculation.

View File

@ -701,9 +701,9 @@ Item: ``META``
Matches 32 bit metadata item set.
On egress, metadata can be set either by mbuf metadata field with
PKT_TX_DYNF_METADATA flag or ``SET_META`` action. On ingress, ``SET_META``
RTE_MBUF_DYNFLAG_TX_METADATA flag or ``SET_META`` action. On ingress, ``SET_META``
action sets metadata for a packet and the metadata will be reported via
``metadata`` dynamic field of ``rte_mbuf`` with PKT_RX_DYNF_METADATA flag.
``metadata`` dynamic field of ``rte_mbuf`` with RTE_MBUF_DYNFLAG_RX_METADATA flag.
- Default ``mask`` matches the specified Rx metadata value.
@ -1827,8 +1827,8 @@ flows to loop between groups.
Action: ``MARK``
^^^^^^^^^^^^^^^^
Attaches an integer value to packets and sets ``PKT_RX_FDIR`` and
``PKT_RX_FDIR_ID`` mbuf flags.
Attaches an integer value to packets and sets ``RTE_MBUF_F_RX_FDIR`` and
``RTE_MBUF_F_RX_FDIR_ID`` mbuf flags.
This value is arbitrary and application-defined. Maximum allowed value
depends on the underlying implementation. It is returned in the
@ -1848,7 +1848,7 @@ Action: ``FLAG``
^^^^^^^^^^^^^^^^
Flags packets. Similar to `Action: MARK`_ without a specific value; only
sets the ``PKT_RX_FDIR`` mbuf flag.
sets the ``RTE_MBUF_F_RX_FDIR`` mbuf flag.
- No configurable properties.
@ -2809,10 +2809,10 @@ Action: ``SET_META``
Set metadata. Item ``META`` matches metadata.
Metadata set by mbuf metadata field with PKT_TX_DYNF_METADATA flag on egress
Metadata set by mbuf metadata field with RTE_MBUF_DYNFLAG_TX_METADATA flag on egress
will be overridden by this action. On ingress, the metadata will be carried by
``metadata`` dynamic field of ``rte_mbuf`` which can be accessed by
``RTE_FLOW_DYNF_METADATA()``. PKT_RX_DYNF_METADATA flag will be set along
``RTE_FLOW_DYNF_METADATA()``. RTE_MBUF_DYNFLAG_RX_METADATA flag will be set along
with the data.
The mbuf dynamic field must be registered by calling

View File

@ -50,11 +50,6 @@ Deprecation Notices
* mempool: The mempool API macros ``MEMPOOL_PG_*`` are deprecated and
will be removed in DPDK 22.11.
* mbuf: The mbuf offload flags ``PKT_*`` will be renamed as ``RTE_MBUF_F_*``.
A compatibility layer will be kept until DPDK 22.11, except for the flags
that are already deprecated (``PKT_RX_L4_CKSUM_BAD``, ``PKT_RX_IP_CKSUM_BAD``,
``PKT_RX_EIP_CKSUM_BAD``, ``PKT_TX_QINQ_PKT``) which will be removed.
* pci: To reduce unnecessary ABIs exposed by DPDK bus driver, "rte_bus_pci.h"
will be made internal in 21.11 and macros/data structures/functions defined
in the header will not be considered as ABI anymore. This change is inspired

View File

@ -390,6 +390,8 @@ API Changes
* mempool: The mempool API macros ``MEMPOOL_PG_*`` are deprecated and
will be removed in DPDK 22.11.
* mbuf: The mbuf offload flags ``PKT_*`` are renamed as ``RTE_MBUF_F_*``. A
compatibility layer will be kept until DPDK 22.11.
* net: Renamed ``s_addr`` and ``d_addr`` fields of ``rte_ether_hdr`` structure
to ``src_addr`` and ``dst_addr``, respectively.

View File

@ -373,20 +373,20 @@ cn10k_cpt_sec_ucc_process(struct rte_crypto_op *cop,
switch (uc_compcode) {
case ROC_IE_OT_UCC_SUCCESS:
if (sa->ip_csum_enable)
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM:
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
case ROC_IE_OT_UCC_SUCCESS_PKT_L4_GOODCSUM:
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (sa->ip_csum_enable)
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case ROC_IE_OT_UCC_SUCCESS_PKT_L4_BADCSUM:
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (sa->ip_csum_enable)
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
default:
break;

View File

@ -69,10 +69,10 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn10k_ipsec_sa *sess,
}
#endif
if (m_src->ol_flags & PKT_TX_IP_CKSUM)
if (m_src->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
inst_w4_u64 &= ~BIT_ULL(33);
if (m_src->ol_flags & PKT_TX_L4_MASK)
if (m_src->ol_flags & RTE_MBUF_F_TX_L4_MASK)
inst_w4_u64 &= ~BIT_ULL(32);
/* Prepare CPT instruction */

View File

@ -642,7 +642,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
uint64_t ol_flags = m->ol_flags;
if (ol_flags & PKT_TX_SEC_OFFLOAD) {
if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
uintptr_t ssow_base = base;
if (ev->sched_type)

View File

@ -428,53 +428,53 @@ octeontx_create_rx_ol_flags_array(void *mem)
errcode = idx & 0xff;
errlev = (idx & 0x700) >> 8;
val = PKT_RX_IP_CKSUM_UNKNOWN;
val |= PKT_RX_L4_CKSUM_UNKNOWN;
val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
switch (errlev) {
case OCCTX_ERRLEV_RE:
if (errcode) {
val |= PKT_RX_IP_CKSUM_BAD;
val |= PKT_RX_L4_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= PKT_RX_L4_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
case OCCTX_ERRLEV_LC:
if (errcode == OCCTX_EC_IP4_CSUM) {
val |= PKT_RX_IP_CKSUM_BAD;
val |= PKT_RX_OUTER_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
break;
case OCCTX_ERRLEV_LD:
/* Check if parsed packet is neither IPv4 or IPV6 */
if (errcode == OCCTX_EC_IP4_NOT)
break;
val |= PKT_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (errcode == OCCTX_EC_L4_CSUM)
val |= PKT_RX_OUTER_L4_CKSUM_BAD;
val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
else
val |= PKT_RX_L4_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
case OCCTX_ERRLEV_LE:
if (errcode == OCCTX_EC_IP4_CSUM)
val |= PKT_RX_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
val |= PKT_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case OCCTX_ERRLEV_LF:
/* Check if parsed packet is neither IPv4 or IPV6 */
if (errcode == OCCTX_EC_IP4_NOT)
break;
val |= PKT_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (errcode == OCCTX_EC_L4_CSUM)
val |= PKT_RX_L4_CKSUM_BAD;
val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
val |= PKT_RX_L4_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
break;
}

View File

@ -126,7 +126,7 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
if (likely(wqe->s.w2.vv)) {
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->vlan_tci =
ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
mbuf->data_off + wqe->s.w4.vlptr + 2)));

View File

@ -277,7 +277,7 @@ otx2_ssogws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
uint16_t ref_cnt = m->refcnt;
if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
(m->ol_flags & PKT_TX_SEC_OFFLOAD)) {
(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
txq = otx2_ssogws_xtract_meta(m, txq_data);
return otx2_sec_event_tx(base, ev, m, txq, flags);
}

View File

@ -149,7 +149,7 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* check for vlan info */
if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
mbuf->vlan_tci = ppd->tp_vlan_tci;
mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
mbuf->ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
if (!pkt_q->vlan_strip && rte_vlan_insert(&mbuf))
PMD_LOG(ERR, "Failed to reinsert VLAN tag");
@ -229,7 +229,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
/* insert vlan info if necessary */
if (mbuf->ol_flags & PKT_TX_VLAN) {
if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
if (rte_vlan_insert(&mbuf)) {
rte_pktmbuf_free(mbuf);
continue;

View File

@ -15,20 +15,20 @@
#include "hw_atl/hw_atl_b0_internal.h"
#define ATL_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG)
#define ATL_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG)
#define ATL_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
@ -845,21 +845,21 @@ atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
/* IPv4 csum error ? */
if (rxd_wb->rx_stat & BIT(1))
mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else {
mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
}
/* CSUM calculated ? */
if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
if (rxd_wb->rx_stat & BIT(2))
mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
} else {
mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
}
return mbuf_flags;
@ -1039,12 +1039,12 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
rx_mbuf->ol_flags |= PKT_RX_VLAN;
rx_mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
rx_mbuf->vlan_tci = rxd_wb.vlan;
if (cfg->vlan_strip)
rx_mbuf->ol_flags |=
PKT_RX_VLAN_STRIPPED;
RTE_MBUF_F_RX_VLAN_STRIPPED;
}
if (!rx_mbuf_first)
@ -1174,12 +1174,12 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
uint32_t tx_cmd = 0;
uint64_t ol_flags = tx_pkt->ol_flags;
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
txc->cmd = 0x4;
if (ol_flags & PKT_TX_IPV6)
if (ol_flags & RTE_MBUF_F_TX_IPV6)
txc->cmd |= 0x2;
txc->l2_len = tx_pkt->l2_len;
@ -1189,7 +1189,7 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
txc->mss_len = tx_pkt->tso_segsz;
}
if (ol_flags & PKT_TX_VLAN) {
if (ol_flags & RTE_MBUF_F_TX_VLAN) {
tx_cmd |= tx_desc_cmd_vlan;
txc->vlan_tag = tx_pkt->vlan_tci;
}
@ -1207,9 +1207,9 @@ atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
uint32_t tx_cmd)
{
txd->cmd |= tx_desc_cmd_fcs;
txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
/* L4 csum requested */
txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
txd->cmd |= tx_cmd;
}

View File

@ -1311,7 +1311,7 @@ avp_dev_copy_from_buffers(struct avp_dev *avp,
src_offset = 0;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
ol_flags = PKT_RX_VLAN;
ol_flags = RTE_MBUF_F_RX_VLAN;
vlan_tci = pkt_buf->vlan_tci;
} else {
ol_flags = 0;
@ -1569,7 +1569,7 @@ avp_recv_pkts(void *rx_queue,
m->port = avp->port_id;
if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
m->ol_flags = PKT_RX_VLAN;
m->ol_flags = RTE_MBUF_F_RX_VLAN;
m->vlan_tci = pkt_buf->vlan_tci;
}
@ -1675,7 +1675,7 @@ avp_dev_copy_to_buffers(struct avp_dev *avp,
first_buf->nb_segs = count;
first_buf->pkt_len = total_length;
if (mbuf->ol_flags & PKT_TX_VLAN) {
if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
first_buf->vlan_tci = mbuf->vlan_tci;
}
@ -1906,7 +1906,7 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt_buf->nb_segs = 1;
pkt_buf->next = NULL;
if (m->ol_flags & PKT_TX_VLAN) {
if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
pkt_buf->vlan_tci = m->vlan_tci;
}

View File

@ -260,17 +260,17 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
if (rxq->pdata->rx_csum_enable) {
mbuf->ol_flags = 0;
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
} else if (
unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
@ -282,25 +282,24 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
if (!err || !etlt) {
if (etlt == RX_CVLAN_TAG_PRESENT) {
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
} else {
mbuf->ol_flags &=
~(PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED);
mbuf->vlan_tci = 0;
}
mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
} else {
mbuf->ol_flags &=
~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
mbuf->vlan_tci = 0;
}
}
/* Indicate if a Context Descriptor is next */
if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA))
mbuf->ol_flags |= PKT_RX_IEEE1588_PTP
| PKT_RX_IEEE1588_TMST;
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP
| RTE_MBUF_F_RX_IEEE1588_TMST;
pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
PL) - rxq->crc_len;
/* Mbuf populate */
@ -426,17 +425,17 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads;
if (!err || !etlt) {
if (etlt == RX_CVLAN_TAG_PRESENT) {
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED;
} else {
mbuf->ol_flags &=
~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
mbuf->vlan_tci = 0;
}
}
@ -465,17 +464,17 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
first_seg->port = rxq->port_id;
if (rxq->pdata->rx_csum_enable) {
mbuf->ol_flags = 0;
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
} else if (unlikely(error_status
== AXGBE_L4_CSUM_ERR)) {
mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
@ -795,7 +794,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
mbuf->pkt_len);
/* Timestamp enablement check */
if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST)
if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1);
rte_wmb();
/* Mark it as First and Last Descriptor */
@ -804,14 +803,14 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
/* Mark it as a NORMAL descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
/* configure h/w Offload */
mask = mbuf->ol_flags & PKT_TX_L4_MASK;
if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM)
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
rte_wmb();
if (mbuf->ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
/* Mark it as a CONTEXT descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
CTXT, 1);

View File

@ -23,7 +23,7 @@ axgbe_vec_tx(volatile struct axgbe_tx_desc *desc,
{
uint64_t tmst_en = 0;
/* Timestamp enablement check */
if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST)
if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
tmst_en = TX_DESC_CTRL_FLAG_TMST;
__m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |
TX_DESC_CTRL_FLAGS | mbuf->data_len

View File

@ -2189,7 +2189,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
tx_start_bd->nbd = rte_cpu_to_le_16(2);
if (m0->ol_flags & PKT_TX_VLAN) {
if (m0->ol_flags & RTE_MBUF_F_TX_VLAN) {
tx_start_bd->vlan_or_ethertype =
rte_cpu_to_le_16(m0->vlan_tci);
tx_start_bd->bd_flags.as_bitfield |=

View File

@ -435,7 +435,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
rx_mb->vlan_tci = cqe_fp->vlan_tag;
rx_mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
rx_mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
rx_pkts[nb_rx] = rx_mb;

View File

@ -260,25 +260,25 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
mbuf->data_len = mbuf->pkt_len;
mbuf->port = rxq->port_id;
mbuf->ol_flags = PKT_RX_LRO;
mbuf->ol_flags = RTE_MBUF_F_RX_LRO;
bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1);
if (likely(tpa_info->hash_valid)) {
mbuf->hash.rss = tpa_info->rss_hash;
mbuf->ol_flags |= PKT_RX_RSS_HASH;
mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
} else if (tpa_info->cfa_code_valid) {
mbuf->hash.fdir.id = tpa_info->cfa_code;
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
if (tpa_info->vlan_valid && BNXT_RX_VLAN_STRIP_EN(rxq->bp)) {
mbuf->vlan_tci = tpa_info->vlan;
mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
if (likely(tpa_info->l4_csum_valid))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
/* recycle next mbuf */
data_cons = RING_NEXT(data_cons);
@ -576,34 +576,34 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
if (BNXT_RX_VLAN_STRIP_EN(rxq->bp)) {
if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
pt[i] |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) {
/* Tunnel case. */
if (outer_cksum_enabled) {
if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
pt[i] |= PKT_RX_IP_CKSUM_GOOD;
pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
pt[i] |= PKT_RX_L4_CKSUM_GOOD;
pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
pt[i] |= PKT_RX_OUTER_L4_CKSUM_GOOD;
pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
} else {
if (i & RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
pt[i] |= PKT_RX_IP_CKSUM_GOOD;
pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
pt[i] |= PKT_RX_L4_CKSUM_GOOD;
pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
} else {
/* Non-tunnel case. */
if (i & RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
pt[i] |= PKT_RX_IP_CKSUM_GOOD;
pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (i & RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
pt[i] |= PKT_RX_L4_CKSUM_GOOD;
pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
}
@ -616,30 +616,30 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
/* Tunnel case. */
if (outer_cksum_enabled) {
if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
pt[i] |= PKT_RX_IP_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
pt[i] |= PKT_RX_OUTER_IP_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
pt[i] |= PKT_RX_L4_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
} else {
if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4))
pt[i] |= PKT_RX_IP_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
pt[i] |= PKT_RX_L4_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
} else {
/* Non-tunnel case. */
if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
pt[i] |= PKT_RX_IP_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4))
pt[i] |= PKT_RX_L4_CKSUM_BAD;
pt[i] |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
}
@ -677,13 +677,13 @@ bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp,
if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
ol_flags |= PKT_RX_RSS_HASH;
ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
#ifdef RTE_LIBRTE_IEEE1588
if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
mbuf->ol_flags = ol_flags;
@ -807,7 +807,7 @@ bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
mbuf->hash.fdir.hi = mark_id;
*bnxt_cfa_code_dynfield(mbuf) = cfa_code & 0xffffffffull;
mbuf->hash.fdir.id = rxcmp1->cfa_code;
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
return mark_id;
}
@ -854,7 +854,7 @@ void bnxt_set_mark_in_mbuf(struct bnxt *bp,
}
mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mbuf->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,

View File

@ -212,7 +212,7 @@ static inline void bnxt_rx_vlan_v2(struct rte_mbuf *mbuf,
{
if (RX_CMP_VLAN_VALID(rxcmp)) {
mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1);
mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
}
@ -276,47 +276,47 @@ static inline void bnxt_parse_csum_v2(struct rte_mbuf *mbuf,
t_pkt = 1;
if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else
mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK)
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else
mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
} else {
hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2);
if (hdr_cnt > 1)
t_pkt = 1;
if (RX_CMP_V2_L4_CS_OK(flags2))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else if (RX_CMP_V2_L4_CS_ERR(error_v2))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
if (RX_CMP_V2_L3_CS_OK(flags2))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else if (RX_CMP_V2_L3_CS_ERR(error_v2))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
}
if (t_pkt) {
if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) ||
RX_CMP_V2_T_L4_CS_ERR(error_v2)))
mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
else
mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
}
}

View File

@ -111,12 +111,12 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
static bool
bnxt_xmit_need_long_bd(struct rte_mbuf *tx_pkt, struct bnxt_tx_queue *txq)
{
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
PKT_TX_VLAN | PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
PKT_TX_QINQ) ||
if (tx_pkt->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_TCP_CKSUM |
RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM |
RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_OUTER_IP_CKSUM |
RTE_MBUF_F_TX_TUNNEL_GRE | RTE_MBUF_F_TX_TUNNEL_VXLAN |
RTE_MBUF_F_TX_TUNNEL_GENEVE | RTE_MBUF_F_TX_IEEE1588_TMST |
RTE_MBUF_F_TX_QINQ) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
(txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
return true;
@ -203,13 +203,13 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
vlan_tag_flags = 0;
/* HW can accelerate only outer vlan in QinQ mode */
if (tx_pkt->ol_flags & PKT_TX_QINQ) {
if (tx_pkt->ol_flags & RTE_MBUF_F_TX_QINQ) {
vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
tx_pkt->vlan_tci_outer;
outer_tpid_bd = txq->bp->outer_tpid_bd &
BNXT_OUTER_TPID_BD_MASK;
vlan_tag_flags |= outer_tpid_bd;
} else if (tx_pkt->ol_flags & PKT_TX_VLAN) {
} else if (tx_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
/* shurd: Should this mask at
* TX_BD_LONG_CFA_META_VLAN_VID_MASK?
*/
@ -239,7 +239,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
else
txbd1->cfa_action = txq->bp->tx_cfa_action;
if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
if (tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
uint16_t hdr_size;
/* TSO */
@ -247,7 +247,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
TX_BD_LONG_LFLAGS_T_IPID;
hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
tx_pkt->l4_len;
hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ?
hdr_size += (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_pkt->outer_l2_len +
tx_pkt->outer_l3_len : 0;
/* The hdr_size is multiple of 16bit units not 8bit.
@ -302,24 +302,24 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
PKT_TX_TCP_UDP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
} else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
PKT_TX_TCP_CKSUM) {
} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ==
RTE_MBUF_F_TX_TCP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
} else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
PKT_TX_UDP_CKSUM) {
} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) ==
RTE_MBUF_F_TX_UDP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
} else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
PKT_TX_IP_CKSUM) {
} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ==
RTE_MBUF_F_TX_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
} else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
PKT_TX_OUTER_IP_CKSUM) {
} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ==
RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
} else if ((tx_pkt->ol_flags & PKT_TX_IEEE1588_TMST) ==
PKT_TX_IEEE1588_TMST) {
} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) ==
RTE_MBUF_F_TX_IEEE1588_TMST) {
/* PTP */
txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP;
}

View File

@ -60,25 +60,25 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr);
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM)
#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM)
#define PKT_TX_IIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)
#define PKT_TX_IIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_UDP_CKSUM (RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_TCP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM)
#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \

View File

@ -112,7 +112,7 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
const uint16_t ether_type_slow_be =
rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
return !((mbuf->ol_flags & RTE_MBUF_F_RX_VLAN) ? mbuf->vlan_tci : 0) &&
(ethertype == ether_type_slow_be &&
(subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
}

View File

@ -50,15 +50,15 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
uint16_t flags = 0;
/* Fastpath is dependent on these enums */
RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);

View File

@ -163,10 +163,10 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uintptr_t sa_base, uintptr_t laddr,
res_w1 = sg[10];
/* Clear checksum flags and update security flag */
*ol_flags &= ~(PKT_RX_L4_CKSUM_MASK | PKT_RX_IP_CKSUM_MASK);
*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK | RTE_MBUF_F_RX_IP_CKSUM_MASK);
*ol_flags |= (((res_w1 & 0xFF) == CPT_COMP_WARN) ?
PKT_RX_SEC_OFFLOAD :
(PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED));
RTE_MBUF_F_RX_SEC_OFFLOAD :
(RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
/* Calculate inner packet length */
len = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -
sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);
@ -229,9 +229,9 @@ nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
* 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
*/
if (likely(match_id)) {
ol_flags |= PKT_RX_FDIR;
ol_flags |= RTE_MBUF_F_RX_FDIR;
if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
ol_flags |= PKT_RX_FDIR_ID;
ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
mbuf->hash.fdir.hi = match_id - 1;
}
}
@ -315,7 +315,7 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
if (flag & NIX_RX_OFFLOAD_RSS_F) {
mbuf->hash.rss = tag;
ol_flags |= PKT_RX_RSS_HASH;
ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
/* Process Security packets */
@ -331,9 +331,9 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
/* Rlen */
len = ((res_w1 >> 16) & 0xFFFF) + mbuf->pkt_len;
ol_flags |= ((uc_cc == CPT_COMP_WARN) ?
PKT_RX_SEC_OFFLOAD :
(PKT_RX_SEC_OFFLOAD |
PKT_RX_SEC_OFFLOAD_FAILED));
RTE_MBUF_F_RX_SEC_OFFLOAD :
(RTE_MBUF_F_RX_SEC_OFFLOAD |
RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
} else {
if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
@ -345,11 +345,11 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
if (rx->vtag0_gone) {
ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->vlan_tci = rx->vtag0_tci;
}
if (rx->vtag1_gone) {
ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = rx->vtag1_tci;
}
}
@ -495,7 +495,7 @@ static __rte_always_inline uint64_t
nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
if (w2 & BIT_ULL(21) /* vtag0_gone */) {
ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
*f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
}
@ -506,7 +506,7 @@ static __rte_always_inline uint64_t
nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
{
if (w2 & BIT_ULL(23) /* vtag1_gone */) {
ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
}
@ -678,10 +678,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
f1 = vsetq_lane_u32(cq1_w0, f1, 3);
f2 = vsetq_lane_u32(cq2_w0, f2, 3);
f3 = vsetq_lane_u32(cq3_w0, f3, 3);
ol_flags0 = PKT_RX_RSS_HASH;
ol_flags1 = PKT_RX_RSS_HASH;
ol_flags2 = PKT_RX_RSS_HASH;
ol_flags3 = PKT_RX_RSS_HASH;
ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
} else {
ol_flags0 = 0;
ol_flags1 = 0;
@ -778,8 +778,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC};
const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
PKT_RX_IEEE1588_TMST |
const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
RTE_MBUF_F_RX_IEEE1588_TMST |
tstamp->rx_tstamp_dynflag;
const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
uint64x2_t ts01, ts23, mask;

View File

@ -458,12 +458,12 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
{
uint64_t mask, ol_flags = m->ol_flags;
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
uint16_t *iplen, *oiplen, *oudplen;
uint16_t lso_sb, paylen;
mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
mask = -!!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6));
lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
m->l2_len + m->l3_len + m->l4_len;
@ -472,18 +472,18 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
/* Get iplen position assuming no tunnel hdr */
iplen = (uint16_t *)(mdata + m->l2_len +
(2 << !!(ol_flags & PKT_TX_IPV6)));
(2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(ol_flags & PKT_TX_TUNNEL_MASK)) {
(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
oiplen = (uint16_t *)(mdata + m->outer_l2_len +
(2 << !!(ol_flags &
PKT_TX_OUTER_IPV6)));
RTE_MBUF_F_TX_OUTER_IPV6)));
*oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
paylen);
@ -498,7 +498,7 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
/* Update iplen position to inner ip hdr */
iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
m->l4_len +
(2 << !!(ol_flags & PKT_TX_IPV6)));
(2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
}
*iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
@ -548,11 +548,11 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t ol3type =
((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L3 */
w1.ol3type = ol3type;
@ -564,15 +564,15 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
w1.ol4type = csum + (csum << 1);
/* Inner L3 */
w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_IPV6)) << 2);
w1.il3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2);
w1.il3ptr = w1.ol4ptr + m->l2_len;
w1.il4ptr = w1.il3ptr + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
w1.il3type = w1.il3type + !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
w1.il4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
/* In case of no tunnel header use only
* shift IL3/IL4 fields a bit to use
@ -583,16 +583,16 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
} else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t outer_l2_len = m->outer_l2_len;
/* Outer L3 */
w1.ol3ptr = outer_l2_len;
w1.ol4ptr = outer_l2_len + m->outer_l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L4 */
w1.ol4type = csum + (csum << 1);
@ -608,27 +608,27 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
w1.ol3ptr = l2_len;
w1.ol4ptr = l2_len + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_IPV6)) << 2) +
!!(ol_flags & PKT_TX_IP_CKSUM);
w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
!!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
w1.ol4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_QINQ);
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
}
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uint16_t lso_sb;
uint64_t mask;
@ -639,20 +639,20 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
send_hdr_ext->w0.lso = 1;
send_hdr_ext->w0.lso_mps = m->tso_segsz;
send_hdr_ext->w0.lso_format =
NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(ol_flags & PKT_TX_TUNNEL_MASK)) {
(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
@ -686,7 +686,7 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
}
if (flags & NIX_TX_OFFLOAD_SECURITY_F)
*sec = !!(ol_flags & PKT_TX_SEC_OFFLOAD);
*sec = !!(ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
}
static __rte_always_inline void
@ -722,7 +722,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
const uint16_t flags)
{
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
const uint8_t is_ol_tstamp = !(ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST);
struct nix_send_ext_s *send_hdr_ext =
(struct nix_send_ext_s *)lmt_addr + 16;
uint64_t *lmt = (uint64_t *)lmt_addr;
@ -742,7 +742,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
rte_compiler_barrier();
}
/* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
* next 8 bytes as it corrpt the actual tx tstamp registered
@ -1118,7 +1118,7 @@ cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
uint16_t lso_sb;
uint64_t mask;
if (!(ol_flags & PKT_TX_TCP_SEG))
if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return;
mask = -(!w1->il3type);
@ -1127,20 +1127,20 @@ cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
w0->u |= BIT(14);
w0->lso_sb = lso_sb;
w0->lso_mps = m->tso_segsz;
w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(ol_flags & PKT_TX_TUNNEL_MASK)) {
(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1->il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1->ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
@ -1784,26 +1784,26 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6 assumed) */
0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
0x03, /* PKT_TX_IP_CKSUM */
0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
0x02, /* PKT_TX_IPV4 */
0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_TCP_CKSUM
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
0x02, /* RTE_MBUF_F_TX_IPV4 */
0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_SCTP_CKSUM
0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_UDP_CKSUM
0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
};
@ -1988,40 +1988,40 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
{
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6) */
0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
0x03, /* PKT_TX_IP_CKSUM */
0x13, /* PKT_TX_IP_CKSUM |
* PKT_TX_TCP_CKSUM
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x23, /* PKT_TX_IP_CKSUM |
* PKT_TX_SCTP_CKSUM
0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x33, /* PKT_TX_IP_CKSUM |
* PKT_TX_UDP_CKSUM
0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
0x02, /* PKT_TX_IPV4 */
0x12, /* PKT_TX_IPV4 |
* PKT_TX_TCP_CKSUM
0x02, /* RTE_MBUF_F_TX_IPV4 */
0x12, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x22, /* PKT_TX_IPV4 |
* PKT_TX_SCTP_CKSUM
0x22, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x32, /* PKT_TX_IPV4 |
* PKT_TX_UDP_CKSUM
0x32, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
0x03, /* PKT_TX_IPV4 |
* PKT_TX_IP_CKSUM
0x03, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_IP_CKSUM
*/
0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_TCP_CKSUM
0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_SCTP_CKSUM
0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_UDP_CKSUM
0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
},
@ -2209,11 +2209,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
/* Tx ol_flag for vlan. */
const uint64x2_t olv = {PKT_TX_VLAN, PKT_TX_VLAN};
const uint64x2_t olv = {RTE_MBUF_F_TX_VLAN, RTE_MBUF_F_TX_VLAN};
/* Bit enable for VLAN1 */
const uint64x2_t mlv = {BIT_ULL(49), BIT_ULL(49)};
/* Tx ol_flag for QnQ. */
const uint64x2_t olq = {PKT_TX_QINQ, PKT_TX_QINQ};
const uint64x2_t olq = {RTE_MBUF_F_TX_QINQ, RTE_MBUF_F_TX_QINQ};
/* Bit enable for VLAN0 */
const uint64x2_t mlq = {BIT_ULL(48), BIT_ULL(48)};
/* Load vlan values from packet. outer is VLAN 0 */
@ -2255,8 +2255,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
/* Tx ol_flag for timestam. */
const uint64x2_t olf = {PKT_TX_IEEE1588_TMST,
PKT_TX_IEEE1588_TMST};
const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
const uint64x2_t alg = {BIT_ULL(59), BIT_ULL(59)};
/* Increment send mem address by 8. */
@ -2425,8 +2425,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
const uint64x2_t olf = {PKT_TX_SEC_OFFLOAD,
PKT_TX_SEC_OFFLOAD};
const uint64x2_t olf = {RTE_MBUF_F_TX_SEC_OFFLOAD,
RTE_MBUF_F_TX_SEC_OFFLOAD};
uintptr_t next;
uint8_t dw;

View File

@ -50,15 +50,15 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
uint16_t flags = 0;
/* Fastpath is dependent on these enums */
RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_TCP_CKSUM != (1ULL << 52));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_SCTP_CKSUM != (2ULL << 52));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_UDP_CKSUM != (3ULL << 52));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IP_CKSUM != (1ULL << 54));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_IPV4 != (1ULL << 55));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IP_CKSUM != (1ULL << 58));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV4 != (1ULL << 59));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_IPV6 != (1ULL << 60));
RTE_BUILD_BUG_ON(RTE_MBUF_F_TX_OUTER_UDP_CKSUM != (1ULL << 41));
RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);

View File

@ -103,9 +103,9 @@ nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
* 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
*/
if (likely(match_id)) {
ol_flags |= PKT_RX_FDIR;
ol_flags |= RTE_MBUF_F_RX_FDIR;
if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
ol_flags |= PKT_RX_FDIR_ID;
ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
mbuf->hash.fdir.hi = match_id - 1;
}
}
@ -237,7 +237,7 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
rte_prefetch0((void *)data);
if (unlikely(res != (CPT_COMP_GOOD | ROC_IE_ONF_UCC_SUCCESS << 8)))
return PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED;
return RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
data += lcptr;
/* 20 bits of tag would have the SPI */
@ -258,7 +258,7 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
win_sz = (uint32_t)(dw >> 64);
if (win_sz) {
if (ipsec_antireplay_check(sa, sa_priv, data, win_sz) < 0)
return PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED;
return RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
}
/* Get total length from IPv4 header. We can assume only IPv4 */
@ -272,7 +272,7 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
*rearm_val |= data_off;
*len = rte_be_to_cpu_16(ipv4->total_length) + lcptr;
return PKT_RX_SEC_OFFLOAD;
return RTE_MBUF_F_RX_SEC_OFFLOAD;
}
static __rte_always_inline void
@ -319,7 +319,7 @@ cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
if (flag & NIX_RX_OFFLOAD_RSS_F) {
mbuf->hash.rss = tag;
ol_flags |= PKT_RX_RSS_HASH;
ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
@ -328,11 +328,11 @@ cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
skip_parse:
if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
if (rx->cn9k.vtag0_gone) {
ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->vlan_tci = rx->cn9k.vtag0_tci;
}
if (rx->cn9k.vtag1_gone) {
ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = rx->cn9k.vtag1_tci;
}
}
@ -437,7 +437,7 @@ static __rte_always_inline uint64_t
nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
if (w2 & BIT_ULL(21) /* vtag0_gone */) {
ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
*f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
}
@ -448,7 +448,7 @@ static __rte_always_inline uint64_t
nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
{
if (w2 & BIT_ULL(23) /* vtag1_gone */) {
ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
}
@ -549,10 +549,10 @@ cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
f1 = vsetq_lane_u32(cq1_w0, f1, 3);
f2 = vsetq_lane_u32(cq2_w0, f2, 3);
f3 = vsetq_lane_u32(cq3_w0, f3, 3);
ol_flags0 = PKT_RX_RSS_HASH;
ol_flags1 = PKT_RX_RSS_HASH;
ol_flags2 = PKT_RX_RSS_HASH;
ol_flags3 = PKT_RX_RSS_HASH;
ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
} else {
ol_flags0 = 0;
ol_flags1 = 0;
@ -625,8 +625,8 @@ cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC,
RTE_PTYPE_L2_ETHER_TIMESYNC};
const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
PKT_RX_IEEE1588_TMST |
const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
RTE_MBUF_F_RX_IEEE1588_TMST |
rxq->tstamp->rx_tstamp_dynflag;
const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
uint64x2_t ts01, ts23, mask;

View File

@ -62,12 +62,12 @@ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
{
uint64_t mask, ol_flags = m->ol_flags;
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
uint16_t *iplen, *oiplen, *oudplen;
uint16_t lso_sb, paylen;
mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
mask = -!!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6));
lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
m->l2_len + m->l3_len + m->l4_len;
@ -76,18 +76,18 @@ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
/* Get iplen position assuming no tunnel hdr */
iplen = (uint16_t *)(mdata + m->l2_len +
(2 << !!(ol_flags & PKT_TX_IPV6)));
(2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(ol_flags & PKT_TX_TUNNEL_MASK)) {
(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
oiplen = (uint16_t *)(mdata + m->outer_l2_len +
(2 << !!(ol_flags &
PKT_TX_OUTER_IPV6)));
RTE_MBUF_F_TX_OUTER_IPV6)));
*oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
paylen);
@ -102,7 +102,7 @@ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
/* Update iplen position to inner ip hdr */
iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
m->l4_len +
(2 << !!(ol_flags & PKT_TX_IPV6)));
(2 << !!(ol_flags & RTE_MBUF_F_TX_IPV6)));
}
*iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
@ -152,11 +152,11 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t ol3type =
((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L3 */
w1.ol3type = ol3type;
@ -168,15 +168,15 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
w1.ol4type = csum + (csum << 1);
/* Inner L3 */
w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_IPV6)) << 2);
w1.il3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2);
w1.il3ptr = w1.ol4ptr + m->l2_len;
w1.il4ptr = w1.il3ptr + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
w1.il3type = w1.il3type + !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
w1.il4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
/* In case of no tunnel header use only
* shift IL3/IL4 fields a bit to use
@ -187,16 +187,16 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
} else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
const uint8_t csum = !!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint8_t outer_l2_len = m->outer_l2_len;
/* Outer L3 */
w1.ol3ptr = outer_l2_len;
w1.ol4ptr = outer_l2_len + m->outer_l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) << 2) +
!!(ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
/* Outer L4 */
w1.ol4type = csum + (csum << 1);
@ -212,27 +212,27 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
w1.ol3ptr = l2_len;
w1.ol4ptr = l2_len + m->l3_len;
/* Increment it by 1 if it is IPV4 as 3 is with csum */
w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
((!!(ol_flags & PKT_TX_IPV6)) << 2) +
!!(ol_flags & PKT_TX_IP_CKSUM);
w1.ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
!!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Inner L4 */
w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
w1.ol4type = (ol_flags & RTE_MBUF_F_TX_L4_MASK) >> 52;
}
if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
/* HW will update ptr after vlan0 update */
send_hdr_ext->w1.vlan1_ins_ptr = 12;
send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_QINQ);
/* 2B before end of l2 header */
send_hdr_ext->w1.vlan0_ins_ptr = 12;
send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
}
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & PKT_TX_TCP_SEG)) {
if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
uint16_t lso_sb;
uint64_t mask;
@ -243,20 +243,20 @@ cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
send_hdr_ext->w0.lso = 1;
send_hdr_ext->w0.lso_mps = m->tso_segsz;
send_hdr_ext->w0.lso_format =
NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(ol_flags & PKT_TX_TUNNEL_MASK)) {
(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
uint8_t shift = is_udp_tun ? 32 : 0;
shift += (!!(ol_flags & PKT_TX_OUTER_IPV6) << 4);
shift += (!!(ol_flags & PKT_TX_IPV6) << 3);
shift += (!!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 4);
shift += (!!(ol_flags & RTE_MBUF_F_TX_IPV6) << 3);
w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
@ -297,7 +297,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
struct nix_send_mem_s *send_mem;
uint16_t off = (no_segdw - 1) << 1;
const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
const uint8_t is_ol_tstamp = !(ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST);
send_mem = (struct nix_send_mem_s *)(cmd + off);
if (flags & NIX_TX_MULTI_SEG_F) {
@ -310,7 +310,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
rte_compiler_barrier();
}
/* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
* next 8 bytes as it corrpt the actual tx tstamp registered
@ -554,7 +554,7 @@ cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
uint16_t lso_sb;
uint64_t mask;
if (!(ol_flags & PKT_TX_TCP_SEG))
if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
return;
mask = -(!w1->il3type);
@ -563,15 +563,15 @@ cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
w0->u |= BIT(14);
w0->lso_sb = lso_sb;
w0->lso_mps = m->tso_segsz;
w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
(ol_flags & PKT_TX_TUNNEL_MASK)) {
(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
const uint8_t is_udp_tun =
(CNXK_NIX_UDP_TUN_BITMASK >>
((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) &
((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) >> 45)) &
0x1;
w1->il4type = NIX_SENDL4TYPE_TCP_CKSUM;
@ -579,7 +579,7 @@ cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
/* Update format for UDP tunneled packet */
w0->lso_format += is_udp_tun ? 2 : 6;
w0->lso_format += !!(ol_flags & PKT_TX_OUTER_IPV6) << 1;
w0->lso_format += !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) << 1;
}
}
@ -1061,26 +1061,26 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6 assumed) */
0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
0x03, /* PKT_TX_IP_CKSUM */
0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
0x02, /* PKT_TX_IPV4 */
0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_TCP_CKSUM
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
0x13, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM */
0x23, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_SCTP_CKSUM */
0x33, /* RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM */
0x02, /* RTE_MBUF_F_TX_IPV4 */
0x12, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_TCP_CKSUM */
0x22, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_SCTP_CKSUM */
0x32, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_UDP_CKSUM */
0x03, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM */
0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_SCTP_CKSUM
0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_UDP_CKSUM
0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
};
@ -1265,40 +1265,40 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
{
/* [0-15] = il4type:il3type */
0x04, /* none (IPv6) */
0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
0x03, /* PKT_TX_IP_CKSUM */
0x13, /* PKT_TX_IP_CKSUM |
* PKT_TX_TCP_CKSUM
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
0x03, /* RTE_MBUF_F_TX_IP_CKSUM */
0x13, /* RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x23, /* PKT_TX_IP_CKSUM |
* PKT_TX_SCTP_CKSUM
0x23, /* RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x33, /* PKT_TX_IP_CKSUM |
* PKT_TX_UDP_CKSUM
0x33, /* RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
0x02, /* PKT_TX_IPV4 */
0x12, /* PKT_TX_IPV4 |
* PKT_TX_TCP_CKSUM
0x02, /* RTE_MBUF_F_TX_IPV4 */
0x12, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x22, /* PKT_TX_IPV4 |
* PKT_TX_SCTP_CKSUM
0x22, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x32, /* PKT_TX_IPV4 |
* PKT_TX_UDP_CKSUM
0x32, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
0x03, /* PKT_TX_IPV4 |
* PKT_TX_IP_CKSUM
0x03, /* RTE_MBUF_F_TX_IPV4 |
* RTE_MBUF_F_TX_IP_CKSUM
*/
0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_TCP_CKSUM
0x13, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_TCP_CKSUM
*/
0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_SCTP_CKSUM
0x23, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_SCTP_CKSUM
*/
0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
* PKT_TX_UDP_CKSUM
0x33, /* RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM |
* RTE_MBUF_F_TX_UDP_CKSUM
*/
},
@ -1486,11 +1486,11 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
/* Tx ol_flag for vlan. */
const uint64x2_t olv = {PKT_TX_VLAN, PKT_TX_VLAN};
const uint64x2_t olv = {RTE_MBUF_F_TX_VLAN, RTE_MBUF_F_TX_VLAN};
/* Bit enable for VLAN1 */
const uint64x2_t mlv = {BIT_ULL(49), BIT_ULL(49)};
/* Tx ol_flag for QnQ. */
const uint64x2_t olq = {PKT_TX_QINQ, PKT_TX_QINQ};
const uint64x2_t olq = {RTE_MBUF_F_TX_QINQ, RTE_MBUF_F_TX_QINQ};
/* Bit enable for VLAN0 */
const uint64x2_t mlq = {BIT_ULL(48), BIT_ULL(48)};
/* Load vlan values from packet. outer is VLAN 0 */
@ -1532,8 +1532,8 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
/* Tx ol_flag for timestam. */
const uint64x2_t olf = {PKT_TX_IEEE1588_TMST,
PKT_TX_IEEE1588_TMST};
const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
const uint64x2_t alg = {BIT_ULL(59), BIT_ULL(59)};
/* Increment send mem address by 8. */

View File

@ -128,8 +128,8 @@
#define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
#define CNXK_NIX_UDP_TUN_BITMASK \
((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
(1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) | \
(1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45)))
/* Subtype from inline outbound error event */
#define CNXK_ETHDEV_SEC_OUTB_EV_SUB 0xFFUL
@ -691,15 +691,15 @@ cnxk_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
*/
*cnxk_nix_timestamp_dynfield(mbuf, tstamp) =
rte_be_to_cpu_64(*tstamp_ptr);
/* PKT_RX_IEEE1588_TMST flag needs to be set only in case
/* RTE_MBUF_F_RX_IEEE1588_TMST flag needs to be set only in case
* PTP packets are received.
*/
if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
tstamp->rx_tstamp =
*cnxk_nix_timestamp_dynfield(mbuf, tstamp);
tstamp->rx_ready = 1;
mbuf->ol_flags |= PKT_RX_IEEE1588_PTP |
PKT_RX_IEEE1588_TMST |
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
RTE_MBUF_F_RX_IEEE1588_TMST |
tstamp->rx_tstamp_dynflag;
}
}

View File

@ -238,9 +238,9 @@ nix_create_rx_ol_flags_array(void *mem)
errlev = idx & 0xf;
errcode = (idx & 0xff0) >> 4;
val = PKT_RX_IP_CKSUM_UNKNOWN;
val |= PKT_RX_L4_CKSUM_UNKNOWN;
val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
switch (errlev) {
case NPC_ERRLEV_RE:
@ -248,46 +248,46 @@ nix_create_rx_ol_flags_array(void *mem)
* including Outer L2 length mismatch error
*/
if (errcode) {
val |= PKT_RX_IP_CKSUM_BAD;
val |= PKT_RX_L4_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= PKT_RX_L4_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LC:
if (errcode == NPC_EC_OIP4_CSUM ||
errcode == NPC_EC_IP_FRAG_OFFSET_1) {
val |= PKT_RX_IP_CKSUM_BAD;
val |= PKT_RX_OUTER_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
break;
case NPC_ERRLEV_LG:
if (errcode == NPC_EC_IIP4_CSUM)
val |= PKT_RX_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
val |= PKT_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
break;
case NPC_ERRLEV_NIX:
if (errcode == NIX_RX_PERRCODE_OL4_CHK ||
errcode == NIX_RX_PERRCODE_OL4_LEN ||
errcode == NIX_RX_PERRCODE_OL4_PORT) {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= PKT_RX_L4_CKSUM_BAD;
val |= PKT_RX_OUTER_L4_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
} else if (errcode == NIX_RX_PERRCODE_IL4_CHK ||
errcode == NIX_RX_PERRCODE_IL4_LEN ||
errcode == NIX_RX_PERRCODE_IL4_PORT) {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= PKT_RX_L4_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else if (errcode == NIX_RX_PERRCODE_IL3_LEN ||
errcode == NIX_RX_PERRCODE_OL3_LEN) {
val |= PKT_RX_IP_CKSUM_BAD;
val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
val |= PKT_RX_IP_CKSUM_GOOD;
val |= PKT_RX_L4_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
break;
}

View File

@ -536,7 +536,7 @@ static inline unsigned int flits_to_desc(unsigned int n)
*/
static inline int is_eth_imm(const struct rte_mbuf *m)
{
unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
unsigned int hdrlen = (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ?
sizeof(struct cpl_tx_pkt_lso_core) : 0;
hdrlen += sizeof(struct cpl_tx_pkt);
@ -746,12 +746,12 @@ static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
{
int csum_type;
if (m->ol_flags & PKT_TX_IP_CKSUM) {
switch (m->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
csum_type = TX_CSUM_TCPIP;
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
csum_type = TX_CSUM_UDPIP;
break;
default:
@ -1026,7 +1026,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
/* fill the cpl message, same as in t4_eth_xmit, this should be kept
* similar to t4_eth_xmit
*/
if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, mbuf) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
@ -1034,7 +1034,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
}
if (mbuf->ol_flags & PKT_TX_VLAN) {
if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
@ -1127,7 +1127,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
}
max_pkt_len = txq->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
if ((!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) &&
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
@ -1138,7 +1138,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ||
m->pkt_len > RTE_ETHER_MAX_LEN)) {
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
@ -1201,7 +1201,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
len += sizeof(*cpl);
/* Coalescing skipped and we send through normal path */
if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
FW_ETH_TX_PKT_WR :
FW_ETH_TX_PKT_VM_WR) |
@ -1210,7 +1210,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
cpl = (void *)(wr + 1);
else
cpl = (void *)(vmwr + 1);
if (m->ol_flags & PKT_TX_IP_CKSUM) {
if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, m) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
@ -1220,7 +1220,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
lso = (void *)(wr + 1);
else
lso = (void *)(vmwr + 1);
v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
v6 = (m->ol_flags & RTE_MBUF_F_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
@ -1256,7 +1256,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
txq->stats.tx_cso += m->tso_segsz;
}
if (m->ol_flags & PKT_TX_VLAN) {
if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
@ -1526,27 +1526,27 @@ static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
if (cpl->vlan_ex)
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
else
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
if (cpl->l2info & htonl(F_RXF_IP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
csum_ok ? PKT_RX_IP_CKSUM_GOOD :
PKT_RX_IP_CKSUM_BAD);
csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
RTE_MBUF_F_RX_IP_CKSUM_BAD);
else if (cpl->l2info & htonl(F_RXF_IP6))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
csum_ok ? PKT_RX_IP_CKSUM_GOOD :
PKT_RX_IP_CKSUM_BAD);
csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
RTE_MBUF_F_RX_IP_CKSUM_BAD);
if (cpl->l2info & htonl(F_RXF_TCP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
csum_ok ? PKT_RX_L4_CKSUM_GOOD :
PKT_RX_L4_CKSUM_BAD);
csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
RTE_MBUF_F_RX_L4_CKSUM_BAD);
else if (cpl->l2info & htonl(F_RXF_UDP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
csum_ok ? PKT_RX_L4_CKSUM_GOOD :
PKT_RX_L4_CKSUM_BAD);
csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
RTE_MBUF_F_RX_L4_CKSUM_BAD);
}
/**
@ -1637,7 +1637,7 @@ static int process_responses(struct sge_rspq *q, int budget,
if (!rss_hdr->filter_tid &&
rss_hdr->hash_type) {
pkt->ol_flags |= PKT_RX_RSS_HASH;
pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
pkt->hash.rss =
ntohl(rss_hdr->hash_val);
}

View File

@ -80,10 +80,9 @@
RTE_ETH_RSS_TCP | \
RTE_ETH_RSS_SCTP)
#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_TCP_CKSUM | \
PKT_TX_UDP_CKSUM)
#define DPAA_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_TCP_CKSUM | \
RTE_MBUF_F_TX_UDP_CKSUM)
/* DPAA Frame descriptor macros */

View File

@ -125,8 +125,8 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_GOOD;
m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD;
switch (prs) {
case DPAA_PKT_TYPE_IPV4:
@ -204,13 +204,13 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
break;
case DPAA_PKT_TYPE_IPV4_CSUM_ERR:
case DPAA_PKT_TYPE_IPV6_CSUM_ERR:
m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_BAD;
m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_IP_CKSUM_BAD;
break;
case DPAA_PKT_TYPE_IPV4_TCP_CSUM_ERR:
case DPAA_PKT_TYPE_IPV6_TCP_CSUM_ERR:
case DPAA_PKT_TYPE_IPV4_UDP_CSUM_ERR:
case DPAA_PKT_TYPE_IPV6_UDP_CSUM_ERR:
m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_L4_CKSUM_BAD;
m->ol_flags = RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_L4_CKSUM_BAD;
break;
case DPAA_PKT_TYPE_NONE:
m->packet_type = 0;
@ -229,7 +229,7 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
/* Check if Vlan is present */
if (prs & DPAA_PARSE_VLAN_MASK)
m->ol_flags |= PKT_RX_VLAN;
m->ol_flags |= RTE_MBUF_F_RX_VLAN;
/* Packet received without stripping the vlan */
}

View File

@ -114,7 +114,7 @@ dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
}
m->hash.rss = fd->simple.flc_hi;
m->ol_flags |= PKT_RX_RSS_HASH;
m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (dpaa2_enable_ts[m->port]) {
*dpaa2_timestamp_dynfield(m) = annotation->word2;
@ -141,20 +141,20 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
#if defined(RTE_LIBRTE_IEEE1588)
if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
#endif
if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
mbuf->ol_flags |= PKT_RX_VLAN;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
}
@ -189,9 +189,9 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
}
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
L3_IP_1_MORE_FRAGMENT |
@ -232,9 +232,9 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
annotation->word4);
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (dpaa2_enable_ts[mbuf->port]) {
*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
@ -1228,7 +1228,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely(((*bufs)->ol_flags
& PKT_TX_VLAN) ||
& RTE_MBUF_F_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
@ -1271,7 +1271,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
goto send_n_return;
}
if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN) ||
if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
& RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
int ret = rte_vlan_insert(bufs);
@ -1532,7 +1532,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely((*bufs)->ol_flags
& PKT_TX_VLAN)) {
& RTE_MBUF_F_TX_VLAN)) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;

View File

@ -50,15 +50,14 @@
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
#define E1000_TX_OFFLOAD_MASK ( \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_VLAN)
#define E1000_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_VLAN)
#define E1000_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
/* PCI offset for querying configuration status register */
#define PCI_CFG_STATUS_REG 0x06
@ -236,7 +235,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
* When doing checksum or TCP segmentation with IPv6 headers,
* IPCSE field should be set t0 0.
*/
if (flags & PKT_TX_IP_CKSUM) {
if (flags & RTE_MBUF_F_TX_IP_CKSUM) {
ctx.lower_setup.ip_fields.ipcse =
(uint16_t)rte_cpu_to_le_16(ipcse - 1);
cmd_len |= E1000_TXD_CMD_IP;
@ -249,13 +248,13 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
ctx.upper_setup.tcp_fields.tucse = 0;
switch (flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
switch (flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_UDP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
offsetof(struct rte_udp_hdr, dgram_cksum));
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
case PKT_TX_TCP_CKSUM:
case RTE_MBUF_F_TX_TCP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
offsetof(struct rte_tcp_hdr, cksum));
cmd_len |= E1000_TXD_CMD_TCP;
@ -358,8 +357,8 @@ tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
uint32_t tmp;
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
return tmp;
}
@ -412,7 +411,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
ol_flags = tx_pkt->ol_flags;
/* If hardware offload required */
tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
tx_ol_req = (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK));
if (tx_ol_req) {
hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
hdrlen.f.l2_len = tx_pkt->l2_len;
@ -508,7 +507,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
popts_spec = 0;
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN) {
if (ol_flags & RTE_MBUF_F_TX_VLAN) {
cmd_type_len |= E1000_TXD_CMD_VLE;
popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
}
@ -658,7 +657,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED : 0);
return pkt_flags;
}
@ -669,9 +668,9 @@ rx_desc_error_to_pkt_flags(uint32_t rx_error)
uint64_t pkt_flags = 0;
if (rx_error & E1000_RXD_ERR_IPE)
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (rx_error & E1000_RXD_ERR_TCPE)
pkt_flags |= PKT_RX_L4_CKSUM_BAD;
pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
return pkt_flags;
}
@ -813,7 +812,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->ol_flags = rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
/* Only valid if PKT_RX_VLAN set in pkt_flags */
/* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
@ -1039,7 +1038,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->ol_flags = first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors);
/* Only valid if PKT_RX_VLAN set in pkt_flags */
/* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */

View File

@ -44,24 +44,23 @@
#include "e1000_ethdev.h"
#ifdef RTE_LIBRTE_IEEE1588
#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
#define IGB_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define IGB_TX_IEEE1588_TMST 0
#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IGB_TX_OFFLOAD_MASK ( \
PKT_TX_OUTER_IPV6 | \
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
#define IGB_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
RTE_MBUF_F_TX_OUTER_IPV4 | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG | \
IGB_TX_IEEE1588_TMST)
#define IGB_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
@ -226,12 +225,12 @@ struct igb_tx_queue {
static inline uint64_t
check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
{
if (!(ol_req & PKT_TX_TCP_SEG))
if (!(ol_req & RTE_MBUF_F_TX_TCP_SEG))
return ol_req;
if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
ol_req &= ~PKT_TX_TCP_SEG;
ol_req |= PKT_TX_TCP_CKSUM;
ol_req &= ~RTE_MBUF_F_TX_TCP_SEG;
ol_req |= RTE_MBUF_F_TX_TCP_CKSUM;
}
return ol_req;
}
@ -262,13 +261,13 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
if (ol_flags & PKT_TX_VLAN)
if (ol_flags & RTE_MBUF_F_TX_VLAN)
tx_offload_mask.data |= TX_VLAN_CMP_MASK;
/* check if TCP segmentation required for this packet */
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* implies IP cksum in IPv4 */
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
@ -281,26 +280,26 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,
mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
} else { /* no TSO, check if hardware checksum is needed */
if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_UDP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_udp_hdr)
<< E1000_ADVTXD_L4LEN_SHIFT;
break;
case PKT_TX_TCP_CKSUM:
case RTE_MBUF_F_TX_TCP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
<< E1000_ADVTXD_L4LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
@ -359,9 +358,9 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
uint32_t tmp;
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
tmp |= l4_olinfo[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
return tmp;
}
@ -371,8 +370,8 @@ tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN) != 0];
cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
cmdtype = vlan_cmd[(ol_flags & RTE_MBUF_F_TX_VLAN) != 0];
cmdtype |= tso_cmd[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
return cmdtype;
}
@ -528,11 +527,11 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
cmd_type_len = txq->txd_type |
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
if (tx_ol_req & PKT_TX_TCP_SEG)
if (tx_ol_req & RTE_MBUF_F_TX_TCP_SEG)
pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
#if defined(RTE_LIBRTE_IEEE1588)
if (ol_flags & PKT_TX_IEEE1588_TMST)
if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
#endif
if (tx_ol_req) {
@ -630,7 +629,7 @@ eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
m = tx_pkts[i];
/* Check some limitations for TSO in hardware */
if (m->ol_flags & PKT_TX_TCP_SEG)
if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
(m->l2_len + m->l3_len + m->l4_len >
IGB_TSO_MAX_HDRLEN)) {
@ -745,11 +744,11 @@ igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
{
uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : RTE_MBUF_F_RX_RSS_HASH;
#if defined(RTE_LIBRTE_IEEE1588)
static uint32_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
@ -775,11 +774,11 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
return pkt_flags;
}
@ -793,10 +792,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
*/
static uint64_t error_to_pkt_flags_map[4] = {
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
};
return error_to_pkt_flags_map[(rx_status >>
E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
@ -938,7 +937,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/*
* The vlan_tci field is only valid when PKT_RX_VLAN is
* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field and must be in CPU byte order.
*/
if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
@ -1178,7 +1177,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN is
* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field and must be in CPU byte order.
*/
if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&

View File

@ -121,9 +121,9 @@ static const struct ena_stats ena_stats_rx_strings[] = {
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\
RTE_ETH_TX_OFFLOAD_TCP_TSO)
#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
PKT_TX_IP_CKSUM |\
PKT_TX_TCP_SEG)
#define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\
RTE_MBUF_F_TX_IP_CKSUM |\
RTE_MBUF_F_TX_TCP_SEG)
/** Vendor ID used by Amazon devices */
#define PCI_VENDOR_ID_AMAZON 0x1D0F
@ -131,15 +131,14 @@ static const struct ena_stats ena_stats_rx_strings[] = {
#define PCI_DEVICE_ID_ENA_VF 0xEC20
#define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21
#define ENA_TX_OFFLOAD_MASK (\
PKT_TX_L4_MASK | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_TCP_SEG)
#define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_TCP_SEG)
#define ENA_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
/** HW specific offloads capabilities. */
/* IPv4 checksum offload. */
@ -296,24 +295,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
packet_type |= RTE_PTYPE_L3_IPV4;
if (unlikely(ena_rx_ctx->l3_csum_err))
ol_flags |= PKT_RX_IP_CKSUM_BAD;
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
ol_flags |= PKT_RX_IP_CKSUM_GOOD;
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
} else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
packet_type |= RTE_PTYPE_L3_IPV6;
}
if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
else
if (unlikely(ena_rx_ctx->l4_csum_err))
ol_flags |= PKT_RX_L4_CKSUM_BAD;
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
ol_flags |= PKT_RX_L4_CKSUM_GOOD;
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (fill_hash &&
likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) {
ol_flags |= PKT_RX_RSS_HASH;
ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mbuf->hash.rss = ena_rx_ctx->hash;
}
@ -331,7 +330,7 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
(queue_offloads & QUEUE_OFFLOADS)) {
/* check if TSO is required */
if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
(queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
ena_tx_ctx->tso_enable = true;
@ -339,11 +338,11 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
}
/* check if L3 checksum is needed */
if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
(queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
ena_tx_ctx->l3_csum_enable = true;
if (mbuf->ol_flags & PKT_TX_IPV6) {
if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) {
ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
} else {
ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
@ -356,12 +355,12 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
}
/* check if L4 checksum is needed */
if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) &&
(queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
ena_tx_ctx->l4_csum_enable = true;
} else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
PKT_TX_UDP_CKSUM) &&
} else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
RTE_MBUF_F_TX_UDP_CKSUM) &&
(queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
ena_tx_ctx->l4_csum_enable = true;
@ -2354,7 +2353,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash);
if (unlikely(mbuf->ol_flags &
(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) {
rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
++rx_ring->rx_stats.bad_csum;
}
@ -2402,10 +2401,10 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (ol_flags == 0)
continue;
l4_csum_flag = ol_flags & PKT_TX_L4_MASK;
l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK;
/* SCTP checksum offload is not supported by the ENA. */
if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
l4_csum_flag == PKT_TX_SCTP_CKSUM) {
l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) {
PMD_TX_LOG(DEBUG,
"mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
i, ol_flags);
@ -2415,11 +2414,11 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
/* Check if requested offload is also enabled for the queue */
if ((ol_flags & PKT_TX_IP_CKSUM &&
if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
!(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ||
(l4_csum_flag == PKT_TX_TCP_CKSUM &&
(l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM &&
!(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) ||
(l4_csum_flag == PKT_TX_UDP_CKSUM &&
(l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM &&
!(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) {
PMD_TX_LOG(DEBUG,
"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
@ -2431,7 +2430,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* The caller is obligated to set l2 and l3 len if any cksum
* offload is enabled.
*/
if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) &&
if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) &&
(m->l2_len == 0 || m->l3_len == 0))) {
PMD_TX_LOG(DEBUG,
"mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
@ -2450,14 +2449,14 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* pseudo header checksum is needed.
*/
need_pseudo_csum = false;
if (ol_flags & PKT_TX_IPV4) {
if (ol_flags & PKT_TX_IP_CKSUM &&
if (ol_flags & RTE_MBUF_F_TX_IPV4) {
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM &&
!(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
rte_errno = ENOTSUP;
return i;
}
if (ol_flags & PKT_TX_TCP_SEG &&
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
!(dev_offload_capa & ENA_IPV4_TSO)) {
rte_errno = ENOTSUP;
return i;
@ -2466,7 +2465,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Check HW capabilities and if pseudo csum is needed
* for L4 offloads.
*/
if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
!(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
if (dev_offload_capa &
ENA_L4_IPV4_CSUM_PARTIAL) {
@ -2483,22 +2482,22 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
m->packet_type |= RTE_PTYPE_L4_NONFRAG;
} else if (ol_flags & PKT_TX_TCP_SEG) {
} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* In case we are supposed to TSO and have DF
* not set (DF=0) hardware must be provided with
* partial checksum.
*/
need_pseudo_csum = true;
}
} else if (ol_flags & PKT_TX_IPV6) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
/* There is no support for IPv6 TSO as for now. */
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
rte_errno = ENOTSUP;
return i;
}
/* Check HW capabilities and if pseudo csum is needed */
if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM &&
!(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
if (dev_offload_capa &
ENA_L4_IPV6_CSUM_PARTIAL) {

View File

@ -174,80 +174,80 @@ enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
static inline void enetc_slow_parsing(struct rte_mbuf *m,
uint64_t parse_results)
{
m->ol_flags &= ~(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
m->ol_flags &= ~(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
switch (parse_results) {
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4;
m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6;
m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_TCP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_TCP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_TCP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_TCP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_UDP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_UDP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_UDP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_UDP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_SCTP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_SCTP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_SCTP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_SCTP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV4_ICMP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_ICMP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
case ENETC_PARSE_ERROR | ENETC_PKT_TYPE_IPV6_ICMP:
m->packet_type = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_ICMP;
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD |
PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD |
RTE_MBUF_F_RX_L4_CKSUM_BAD;
return;
/* More switch cases can be added */
default:
m->packet_type = RTE_PTYPE_UNKNOWN;
m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN |
PKT_RX_L4_CKSUM_UNKNOWN;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN |
RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
}
}
@ -256,7 +256,7 @@ static inline void __rte_hot
enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
{
ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD;
switch (parse_results) {
case ENETC_PKT_TYPE_ETHER:

View File

@ -250,7 +250,7 @@ void enic_init_vnic_resources(struct enic *enic)
error_interrupt_offset);
/* Compute unsupported ol flags for enic_prep_pkts() */
enic->wq[index].tx_offload_notsup_mask =
PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
RTE_MBUF_F_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
@ -1749,10 +1749,10 @@ enic_enable_overlay_offload(struct enic *enic)
(enic->geneve ? RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO : 0) |
(enic->vxlan ? RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO : 0);
enic->tx_offload_mask |=
PKT_TX_OUTER_IPV6 |
PKT_TX_OUTER_IPV4 |
PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_MASK;
RTE_MBUF_F_TX_OUTER_IPV6 |
RTE_MBUF_F_TX_OUTER_IPV4 |
RTE_MBUF_F_TX_OUTER_IP_CKSUM |
RTE_MBUF_F_TX_TUNNEL_MASK;
enic->overlay_offload = true;
if (enic->vxlan && enic->geneve)

View File

@ -215,12 +215,12 @@ int enic_get_vnic_config(struct enic *enic)
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
RTE_ETH_RX_OFFLOAD_RSS_HASH;
enic->tx_offload_mask =
PKT_TX_IPV6 |
PKT_TX_IPV4 |
PKT_TX_VLAN |
PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK |
PKT_TX_TCP_SEG;
RTE_MBUF_F_TX_IPV6 |
RTE_MBUF_F_TX_IPV4 |
RTE_MBUF_F_TX_VLAN |
RTE_MBUF_F_TX_IP_CKSUM |
RTE_MBUF_F_TX_L4_MASK |
RTE_MBUF_F_TX_TCP_SEG;
return 0;
}

View File

@ -424,7 +424,7 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
if (!(ol_flags & PKT_TX_TCP_SEG)) {
if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
rte_errno = EINVAL;
return i;
@ -489,7 +489,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
wq_desc_avail = vnic_wq_desc_avail(wq);
head_idx = wq->head_idx;
desc_count = wq->ring.desc_count;
ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
ol_flags_mask = RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK;
tx_oversized = &enic->soft_stats.tx_oversized;
nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
@ -500,7 +500,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
data_len = tx_pkt->data_len;
ol_flags = tx_pkt->ol_flags;
nb_segs = tx_pkt->nb_segs;
tso = ol_flags & PKT_TX_TCP_SEG;
tso = ol_flags & RTE_MBUF_F_TX_TCP_SEG;
/* drop packet if it's too big to send */
if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
@ -517,7 +517,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
mss = 0;
vlan_id = tx_pkt->vlan_tci;
vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN);
vlan_tag_insert = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
bus_addr = (dma_addr_t)
(tx_pkt->buf_iova + tx_pkt->data_off);
@ -543,20 +543,20 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
mss = tx_pkt->tso_segsz;
/* For tunnel, need the size of outer+inner headers */
if (ol_flags & PKT_TX_TUNNEL_MASK) {
if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
header_len += tx_pkt->outer_l2_len +
tx_pkt->outer_l3_len;
}
}
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM;
/* Nic uses just 1 bit for UDP and TCP */
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
case PKT_TX_UDP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
mss |= ENIC_CALC_TCP_UDP_CKSUM;
break;
}
@ -634,7 +634,7 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,
desc->header_length_flags &=
((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
(1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
if (p->ol_flags & PKT_TX_VLAN) {
if (p->ol_flags & RTE_MBUF_F_TX_VLAN) {
desc->header_length_flags |=
1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
}
@ -643,9 +643,9 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,
* is 0, so no need to set offload_mode.
*/
mss = 0;
if (p->ol_flags & PKT_TX_IP_CKSUM)
if (p->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
if (p->ol_flags & PKT_TX_L4_MASK)
if (p->ol_flags & RTE_MBUF_F_TX_L4_MASK)
mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
desc->mss_loopback = mss;

View File

@ -209,11 +209,11 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
/* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
} else {
if (vlan_tci != 0) {
pkt_flags |= PKT_RX_VLAN;
pkt_flags |= RTE_MBUF_F_RX_VLAN;
mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
} else {
mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
@ -227,16 +227,16 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
filter_id = clsf_cqd->filter_id;
if (filter_id) {
pkt_flags |= PKT_RX_FDIR;
pkt_flags |= RTE_MBUF_F_RX_FDIR;
if (filter_id != ENIC_MAGIC_FILTER_ID) {
/* filter_id = mark id + 1, so subtract 1 */
mbuf->hash.fdir.hi = filter_id - 1;
pkt_flags |= PKT_RX_FDIR_ID;
pkt_flags |= RTE_MBUF_F_RX_FDIR_ID;
}
}
} else if (enic_cq_rx_desc_rss_type(cqrd)) {
/* RSS flag */
pkt_flags |= PKT_RX_RSS_HASH;
pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
}
@ -254,17 +254,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
*/
if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
}
if (l4_flags == RTE_PTYPE_L4_UDP ||
l4_flags == RTE_PTYPE_L4_TCP) {
if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else
pkt_flags |= PKT_RX_L4_CKSUM_BAD;
pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
}

View File

@ -167,21 +167,21 @@ enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
0x80, 0x80, 11, 10,
0x80, 0x80, 11, 10,
0x80, 0x80, 11, 10);
/* PKT_RX_RSS_HASH is 1<<1 so fits in 8-bit integer */
/* RTE_MBUF_F_RX_RSS_HASH is 1<<1 so fits in 8-bit integer */
const __m256i rss_shuffle =
_mm256_set_epi8(/* second 128 bits */
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
0, /* rss_types = 0 */
/* first 128 bits */
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
0 /* rss_types = 0 */);
/*
* VLAN offload flags.
@ -191,8 +191,8 @@ enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
const __m256i vlan_shuffle =
_mm256_set_epi32(0, 0, 0, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN);
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, RTE_MBUF_F_RX_VLAN);
/* Use the same shuffle index as vlan_shuffle */
const __m256i vlan_ptype_shuffle =
_mm256_set_epi32(0, 0, 0, 0,
@ -211,39 +211,39 @@ enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
const __m256i csum_shuffle =
_mm256_set_epi8(/* second 128 bits */
/* 1111 ip4+ip4_ok+l4+l4_ok */
((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
/* 1110 ip4_ok+ip4+l4+!l4_ok */
((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1),
(PKT_RX_IP_CKSUM_GOOD >> 1), /* 1101 ip4+ip4_ok */
(PKT_RX_IP_CKSUM_GOOD >> 1), /* 1100 ip4_ok+ip4 */
(PKT_RX_L4_CKSUM_GOOD >> 1), /* 1011 l4+l4_ok */
(PKT_RX_L4_CKSUM_BAD >> 1), /* 1010 l4+!l4_ok */
((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1), /* 1101 ip4+ip4_ok */
(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1), /* 1100 ip4_ok+ip4 */
(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1), /* 1011 l4+l4_ok */
(RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1), /* 1010 l4+!l4_ok */
0, /* 1001 */
0, /* 1000 */
/* 0111 !ip4_ok+ip4+l4+l4_ok */
((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1),
((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
/* 0110 !ip4_ok+ip4+l4+!l4_ok */
((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1),
(PKT_RX_IP_CKSUM_BAD >> 1), /* 0101 !ip4_ok+ip4 */
(PKT_RX_IP_CKSUM_BAD >> 1), /* 0100 !ip4_ok+ip4 */
(PKT_RX_L4_CKSUM_GOOD >> 1), /* 0011 l4+l4_ok */
(PKT_RX_L4_CKSUM_BAD >> 1), /* 0010 l4+!l4_ok */
((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
(RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1), /* 0101 !ip4_ok+ip4 */
(RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1), /* 0100 !ip4_ok+ip4 */
(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1), /* 0011 l4+l4_ok */
(RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1), /* 0010 l4+!l4_ok */
0, /* 0001 */
0, /* 0000 */
/* first 128 bits */
((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1),
(PKT_RX_IP_CKSUM_GOOD >> 1),
(PKT_RX_IP_CKSUM_GOOD >> 1),
(PKT_RX_L4_CKSUM_GOOD >> 1),
(PKT_RX_L4_CKSUM_BAD >> 1),
((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
(RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1),
0, 0,
((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> 1),
((PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1),
(PKT_RX_IP_CKSUM_BAD >> 1),
(PKT_RX_IP_CKSUM_BAD >> 1),
(PKT_RX_L4_CKSUM_GOOD >> 1),
(PKT_RX_L4_CKSUM_BAD >> 1),
((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
((RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1),
(RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1),
(RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1),
(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
(RTE_MBUF_F_RX_L4_CKSUM_BAD >> 1),
0, 0);
/*
* Non-fragment PTYPEs.
@ -471,7 +471,7 @@ enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
/*
* Compute PKT_RX_RSS_HASH.
* Compute RTE_MBUF_F_RX_RSS_HASH.
* Use 2 shifts and 1 shuffle for 8 desc: 0.375 inst/desc
* RSS types in byte 0, 4, 8, 12, 16, 20, 24, 28
* Everything else is zero.
@ -479,7 +479,7 @@ enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
__m256i rss_types =
_mm256_srli_epi32(_mm256_slli_epi32(flags0_7, 10), 28);
/*
* RSS flags (PKT_RX_RSS_HASH) are in
* RSS flags (RTE_MBUF_F_RX_RSS_HASH) are in
* byte 0, 4, 8, 12, 16, 20, 24, 28
* Everything else is zero.
*/
@ -557,7 +557,7 @@ enic_noscatter_vec_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
vlan0_7 = _mm256_sub_epi32(zero4, vlan0_7);
/*
* Compute PKT_RX_VLAN and PKT_RX_VLAN_STRIPPED.
* Compute RTE_MBUF_F_RX_VLAN and RTE_MBUF_F_RX_VLAN_STRIPPED.
* Use 3 shifts, 1 or, 1 shuffle for 8 desc: 0.625 inst/desc
* VLAN offload flags in byte 0, 4, 8, 12, 16, 20, 24, 28
* Everything else is zero.

View File

@ -37,16 +37,15 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd)
}
#endif
#define FM10K_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
#define FM10K_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG)
#define FM10K_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
/* @note: When this function is changed, make corresponding change to
* fm10k_dev_supported_ptypes_get()
@ -78,21 +77,21 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
>> FM10K_RXD_PKTTYPE_SHIFT];
if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
m->ol_flags |= PKT_RX_RSS_HASH;
m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (unlikely((d->d.staterr &
(FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
(FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely((d->d.staterr &
(FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
(FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
uint16_t
@ -131,10 +130,10 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Packets in fm10k device always carry at least one VLAN tag.
* For those packets coming in without VLAN tag,
* the port default VLAN tag will be used.
* So, always PKT_RX_VLAN flag is set and vlan_tci
* So, always RTE_MBUF_F_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mbuf->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
@ -292,10 +291,10 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Packets in fm10k device always carry at least one VLAN tag.
* For those packets coming in without VLAN tag,
* the port default VLAN tag will be used.
* So, always PKT_RX_VLAN flag is set and vlan_tci
* So, always RTE_MBUF_F_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
first_seg->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
@ -580,11 +579,11 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
/* set checksum flags on first descriptor of packet. SCTP checksum
* offload is not supported, but we do not explicitly check for this
* case in favor of greatly simplified processing. */
if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
if (mb->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG))
q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
/* set vlan if requested */
if (mb->ol_flags & PKT_TX_VLAN)
if (mb->ol_flags & RTE_MBUF_F_TX_VLAN)
q->hw_ring[q->next_free].vlan = mb->vlan_tci;
else
q->hw_ring[q->next_free].vlan = 0;
@ -595,9 +594,9 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
q->hw_ring[q->next_free].buflen =
rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
if (mb->ol_flags & PKT_TX_TCP_SEG) {
if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
hdrlen += (mb->ol_flags & PKT_TX_TUNNEL_MASK) ?
hdrlen += (mb->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
mb->outer_l2_len + mb->outer_l3_len : 0;
if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
hdrlen += sizeof(struct fm10k_ftag);
@ -674,7 +673,7 @@ fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
if ((m->ol_flags & PKT_TX_TCP_SEG) &&
if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
(m->tso_segsz < FM10K_TSO_MINMSS)) {
rte_errno = EINVAL;
return i;

View File

@ -38,7 +38,7 @@ fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
#define RXEFLAG_SHIFT (13)
/* IPE/L4E flag shift */
#define L3L4EFLAG_SHIFT (14)
/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
/* shift RTE_MBUF_F_RX_L4_CKSUM_GOOD into one byte by 1 bit */
#define CKSUM_SHIFT (1)
static inline void
@ -52,10 +52,10 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
@ -75,10 +75,10 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
(PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
(RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
@ -87,9 +87,10 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
0, 0, 0, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, 0);
/* Calculate RSS_hash and Vlan fields */
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);

View File

@ -802,7 +802,7 @@ static inline uint64_t hinic_rx_rss_hash(uint32_t offload_type,
rss_type = HINIC_GET_RSS_TYPES(offload_type);
if (likely(rss_type != 0)) {
*rss_hash = cqe_hass_val;
return PKT_RX_RSS_HASH;
return RTE_MBUF_F_RX_RSS_HASH;
}
return 0;
@ -815,33 +815,33 @@ static inline uint64_t hinic_rx_csum(uint32_t status, struct hinic_rxq *rxq)
struct hinic_nic_dev *nic_dev = rxq->nic_dev;
if (unlikely(!(nic_dev->rx_csum_en & HINIC_RX_CSUM_OFFLOAD_EN)))
return PKT_RX_IP_CKSUM_UNKNOWN;
return RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
/* most case checksum is ok */
checksum_err = HINIC_GET_RX_CSUM_ERR(status);
if (likely(checksum_err == 0))
return (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
return (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
/* If BYPASS bit set, all other status indications should be ignored */
if (unlikely(HINIC_CSUM_ERR_BYPASSED(checksum_err)))
return PKT_RX_IP_CKSUM_UNKNOWN;
return RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
flags = 0;
/* IP checksum error */
if (HINIC_CSUM_ERR_IP(checksum_err))
flags |= PKT_RX_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
flags |= PKT_RX_IP_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
/* L4 checksum error */
if (HINIC_CSUM_ERR_L4(checksum_err))
flags |= PKT_RX_L4_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
flags |= PKT_RX_L4_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(HINIC_CSUM_ERR_OTHER(checksum_err)))
flags = PKT_RX_L4_CKSUM_NONE;
flags = RTE_MBUF_F_RX_L4_CKSUM_NONE;
rxq->rxq_stats.errors++;
@ -861,7 +861,7 @@ static inline uint64_t hinic_rx_vlan(uint32_t offload_type, uint32_t vlan_len,
*vlan_tci = vlan_tag;
return PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
return RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
static inline u32 hinic_rx_alloc_mbuf_bulk(struct hinic_rxq *rxq,
@ -1061,7 +1061,7 @@ u16 hinic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
/* lro offload */
lro_num = HINIC_GET_RX_NUM_LRO(cqe.status);
if (unlikely(lro_num != 0)) {
rxm->ol_flags |= PKT_RX_LRO;
rxm->ol_flags |= RTE_MBUF_F_RX_LRO;
rxm->tso_segsz = pkt_len / lro_num;
}

View File

@ -592,7 +592,7 @@ hinic_fill_tx_offload_info(struct rte_mbuf *mbuf,
task->pkt_info2 = 0;
/* Base VLAN */
if (unlikely(ol_flags & PKT_TX_VLAN)) {
if (unlikely(ol_flags & RTE_MBUF_F_TX_VLAN)) {
vlan_tag = mbuf->vlan_tci;
hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
vlan_tag >> VLAN_PRIO_SHIFT);
@ -602,7 +602,7 @@ hinic_fill_tx_offload_info(struct rte_mbuf *mbuf,
if (unlikely(!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)))
return;
if ((ol_flags & PKT_TX_TCP_SEG))
if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG))
/* set tso info for task and qsf */
hinic_set_tso_info(task, queue_info, mbuf, tx_off_info);
else /* just support l4 checksum offload */
@ -718,7 +718,7 @@ hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
psd_hdr.dst_addr = ipv4_hdr->dst_addr;
psd_hdr.zero = 0;
psd_hdr.proto = ipv4_hdr->next_proto_id;
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
psd_hdr.len = 0;
} else {
psd_hdr.len =
@ -738,7 +738,7 @@ hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
} psd_hdr;
psd_hdr.proto = (ipv6_hdr->proto << 24);
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
psd_hdr.len = 0;
else
psd_hdr.len = ipv6_hdr->payload_len;
@ -754,10 +754,10 @@ static inline void hinic_get_outer_cs_pld_offset(struct rte_mbuf *m,
{
uint64_t ol_flags = m->ol_flags;
if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
m->l2_len + m->l3_len;
else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
off_info->payload_offset = m->outer_l2_len + m->outer_l3_len +
m->l2_len + m->l3_len + m->l4_len;
}
@ -767,10 +767,10 @@ static inline void hinic_get_pld_offset(struct rte_mbuf *m,
{
uint64_t ol_flags = m->ol_flags;
if (((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) ||
((ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM))
if (((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) ||
((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_SCTP_CKSUM))
off_info->payload_offset = m->l2_len + m->l3_len;
else if ((ol_flags & PKT_TX_TCP_CKSUM) || (ol_flags & PKT_TX_TCP_SEG))
else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG))
off_info->payload_offset = m->l2_len + m->l3_len +
m->l4_len;
}
@ -845,11 +845,11 @@ static inline uint8_t hinic_analyze_l3_type(struct rte_mbuf *mbuf)
uint8_t l3_type;
uint64_t ol_flags = mbuf->ol_flags;
if (ol_flags & PKT_TX_IPV4)
l3_type = (ol_flags & PKT_TX_IP_CKSUM) ?
if (ol_flags & RTE_MBUF_F_TX_IPV4)
l3_type = (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ?
IPV4_PKT_WITH_CHKSUM_OFFLOAD :
IPV4_PKT_NO_CHKSUM_OFFLOAD;
else if (ol_flags & PKT_TX_IPV6)
else if (ol_flags & RTE_MBUF_F_TX_IPV6)
l3_type = IPV6_PKT;
else
l3_type = UNKNOWN_L3TYPE;
@ -866,11 +866,11 @@ static inline void hinic_calculate_tcp_checksum(struct rte_mbuf *mbuf,
struct rte_tcp_hdr *tcp_hdr;
uint64_t ol_flags = mbuf->ol_flags;
if (ol_flags & PKT_TX_IPV4) {
if (ol_flags & RTE_MBUF_F_TX_IPV4) {
ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
inner_l3_offset);
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
@ -898,11 +898,11 @@ static inline void hinic_calculate_udp_checksum(struct rte_mbuf *mbuf,
struct rte_udp_hdr *udp_hdr;
uint64_t ol_flags = mbuf->ol_flags;
if (ol_flags & PKT_TX_IPV4) {
if (ol_flags & RTE_MBUF_F_TX_IPV4) {
ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
inner_l3_offset);
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
@ -938,21 +938,21 @@ static inline void hinic_calculate_checksum(struct rte_mbuf *mbuf,
{
uint64_t ol_flags = mbuf->ol_flags;
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_UDP_CKSUM:
hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset);
break;
case PKT_TX_TCP_CKSUM:
case RTE_MBUF_F_TX_TCP_CKSUM:
hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset);
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
hinic_calculate_sctp_checksum(off_info);
break;
default:
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
hinic_calculate_tcp_checksum(mbuf, off_info,
inner_l3_offset);
break;
@ -970,8 +970,8 @@ static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
return 0;
/* Support only vxlan offload */
if (unlikely((ol_flags & PKT_TX_TUNNEL_MASK) &&
!(ol_flags & PKT_TX_TUNNEL_VXLAN)))
if (unlikely((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN)))
return -ENOTSUP;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
@ -979,7 +979,7 @@ static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
return -EINVAL;
#endif
if (ol_flags & PKT_TX_TUNNEL_VXLAN) {
if (ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) {
off_info->tunnel_type = TUNNEL_UDP_NO_CSUM;
/* inner_l4_tcp_udp csum should be set to calculate outer
@ -987,9 +987,9 @@ static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m,
*/
off_info->inner_l4_tcp_udp = 1;
if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_IPV6) ||
(ol_flags & PKT_TX_TCP_SEG)) {
if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) ||
(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
inner_l3_offset = m->l2_len + m->outer_l2_len +
m->outer_l3_len;
off_info->outer_l2_len = m->outer_l2_len;
@ -1057,7 +1057,7 @@ static inline bool hinic_get_sge_txoff_info(struct rte_mbuf *mbuf_pkt,
sqe_info->cpy_mbuf_cnt = 0;
/* non tso mbuf */
if (likely(!(mbuf_pkt->ol_flags & PKT_TX_TCP_SEG))) {
if (likely(!(mbuf_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG))) {
if (unlikely(mbuf_pkt->pkt_len > MAX_SINGLE_SGE_SIZE)) {
/* non tso packet len must less than 64KB */
return false;

View File

@ -13,13 +13,12 @@
#define HINIC_GET_WQ_TAIL(txq) \
((txq)->wq->queue_buf_vaddr + (txq)->wq->wq_buf_size)
#define HINIC_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_TCP_CKSUM | \
PKT_TX_UDP_CKSUM | \
PKT_TX_SCTP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_TCP_SEG)
#define HINIC_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_TCP_CKSUM | \
RTE_MBUF_F_TX_UDP_CKSUM | \
RTE_MBUF_F_TX_SCTP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
RTE_MBUF_F_TX_TCP_SEG)
enum sq_wqe_type {
SQ_NORMAL_WQE = 0,

View File

@ -622,7 +622,7 @@ struct hns3_hw {
* - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
* In this mode, HW can not do checksum for special UDP port like
* 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
* packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
* packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need
* do the checksum for these packets to avoid a checksum error.
*
* - HNS3_SPECIAL_PORT_HW_CKSUM_MODE

View File

@ -2341,11 +2341,11 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb,
mb->vlan_tci = 0;
return;
case HNS3_INNER_STRP_VLAN_VLD:
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag);
return;
case HNS3_OUTER_STRP_VLAN_VLD:
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag);
return;
default:
@ -2395,7 +2395,7 @@ hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST;
if (hns3_timestamp_rx_dynflag > 0) {
*RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
rte_mbuf_timestamp_t *) = timestamp;
@ -2481,11 +2481,11 @@ hns3_recv_pkts_simple(void *rx_queue,
rxm->data_len = rxm->pkt_len;
rxm->port = rxq->port_id;
rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
rxm->ol_flags |= PKT_RX_RSS_HASH;
rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
rxm->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
rxm->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
rxm->nb_segs = 1;
rxm->next = NULL;
@ -2500,7 +2500,7 @@ hns3_recv_pkts_simple(void *rx_queue,
rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
@ -2699,17 +2699,17 @@ hns3_recv_scattered_pkts(void *rx_queue,
first_seg->port = rxq->port_id;
first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
first_seg->ol_flags = PKT_RX_RSS_HASH;
first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
first_seg->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
first_seg->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
}
gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
if (gro_size != 0) {
first_seg->ol_flags |= PKT_RX_LRO;
first_seg->ol_flags |= RTE_MBUF_F_RX_LRO;
first_seg->tso_segsz = gro_size;
}
@ -2724,7 +2724,7 @@ hns3_recv_scattered_pkts(void *rx_queue,
l234_info, ol_info);
if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
@ -3151,7 +3151,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw)
static inline bool
hns3_pkt_is_tso(struct rte_mbuf *m)
{
return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG);
return (m->tso_segsz != 0 && m->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
}
static void
@ -3184,7 +3184,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
uint32_t paylen;
hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len;
hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ?
hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
rxm->outer_l2_len + rxm->outer_l3_len : 0;
paylen = rxm->pkt_len - hdr_len;
desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen);
@ -3202,11 +3202,11 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
* To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
* be added to the position close to the IP header when PVID is enabled.
*/
if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN |
PKT_TX_QINQ)) {
if (!txq->pvid_sw_shift_en &&
ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
desc->tx.ol_type_vlan_len_msec |=
rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
if (ol_flags & PKT_TX_QINQ)
if (ol_flags & RTE_MBUF_F_TX_QINQ)
desc->tx.outer_vlan_tag =
rte_cpu_to_le_16(rxm->vlan_tci_outer);
else
@ -3214,14 +3214,14 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
rte_cpu_to_le_16(rxm->vlan_tci);
}
if (ol_flags & PKT_TX_QINQ ||
((ol_flags & PKT_TX_VLAN) && txq->pvid_sw_shift_en)) {
if (ol_flags & RTE_MBUF_F_TX_QINQ ||
((ol_flags & RTE_MBUF_F_TX_VLAN) && txq->pvid_sw_shift_en)) {
desc->tx.type_cs_vlan_tso_len |=
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
}
if (ol_flags & PKT_TX_IEEE1588_TMST)
if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
desc->tx.tp_fe_sc_vld_ra_ri |=
rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
}
@ -3343,14 +3343,14 @@ hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec)
uint64_t ol_flags = m->ol_flags;
/* (outer) IP header type */
if (ol_flags & PKT_TX_OUTER_IPV4) {
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM);
else
tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M,
HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM);
} else if (ol_flags & PKT_TX_OUTER_IPV6) {
} else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
HNS3_OL3T_IPV6);
}
@ -3370,10 +3370,10 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
uint64_t ol_flags = m->ol_flags;
uint16_t inner_l2_len;
switch (ol_flags & PKT_TX_TUNNEL_MASK) {
case PKT_TX_TUNNEL_VXLAN_GPE:
case PKT_TX_TUNNEL_GENEVE:
case PKT_TX_TUNNEL_VXLAN:
switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
case RTE_MBUF_F_TX_TUNNEL_GENEVE:
case RTE_MBUF_F_TX_TUNNEL_VXLAN:
/* MAC in UDP tunnelling packet, include VxLAN and GENEVE */
tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP);
@ -3392,7 +3392,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec,
inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN;
break;
case PKT_TX_TUNNEL_GRE:
case RTE_MBUF_F_TX_TUNNEL_GRE:
tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
/*
@ -3441,7 +3441,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
* calculations, the length of the L2 header include the outer and
* inner, will be filled during the parsing of tunnel packects.
*/
if (!(ol_flags & PKT_TX_TUNNEL_MASK)) {
if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
/*
* For non tunnel type the tunnel type id is 0, so no need to
* assign a value to it. Only the inner(normal) L2 header length
@ -3457,7 +3457,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
* calculate the header length.
*/
if (unlikely(!(ol_flags &
(PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) &&
(RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
m->outer_l2_len == 0)) {
struct rte_net_hdr_lens hdr_len;
(void)rte_net_get_ptype(m, &hdr_len,
@ -3474,7 +3474,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m,
desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer);
desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner);
tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ?
tmp_ol4cs = ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM ?
BIT(HNS3_TXD_OL4CS_B) : 0;
desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs);
@ -3489,9 +3489,9 @@ hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
uint32_t tmp;
tmp = *type_cs_vlan_tso_len;
if (ol_flags & PKT_TX_IPV4)
if (ol_flags & RTE_MBUF_F_TX_IPV4)
l3_type = HNS3_L3T_IPV4;
else if (ol_flags & PKT_TX_IPV6)
else if (ol_flags & RTE_MBUF_F_TX_IPV6)
l3_type = HNS3_L3T_IPV6;
else
l3_type = HNS3_L3T_NONE;
@ -3503,7 +3503,7 @@ hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type);
/* Enable L3 checksum offloads */
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
tmp |= BIT(HNS3_TXD_L3CS_B);
*type_cs_vlan_tso_len = tmp;
}
@ -3514,20 +3514,20 @@ hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len)
uint64_t ol_flags = m->ol_flags;
uint32_t tmp;
/* Enable L4 checksum offloads */
switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) {
case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG:
case PKT_TX_TCP_CKSUM:
case PKT_TX_TCP_SEG:
switch (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)) {
case RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_TCP_SEG:
case RTE_MBUF_F_TX_TCP_CKSUM:
case RTE_MBUF_F_TX_TCP_SEG:
tmp = *type_cs_vlan_tso_len;
tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
HNS3_L4T_TCP);
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
tmp = *type_cs_vlan_tso_len;
tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
HNS3_L4T_UDP);
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
tmp = *type_cs_vlan_tso_len;
tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S,
HNS3_L4T_SCTP);
@ -3584,7 +3584,7 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num,
/* ensure the first 8 frags is greater than mss + header */
hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len;
hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ?
hdr_len += (tx_pkts->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0;
if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len)
return true;
@ -3614,15 +3614,15 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
struct rte_ipv4_hdr *ipv4_hdr;
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
m->outer_l2_len);
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
ipv4_hdr->hdr_checksum = 0;
if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
* If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
* header for TSO packets
*/
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
return true;
udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
m->outer_l2_len + m->outer_l3_len);
@ -3641,13 +3641,13 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags,
struct rte_ipv6_hdr *ipv6_hdr;
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
m->outer_l2_len);
if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
* If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
* header for TSO packets
*/
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
return true;
udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
m->outer_l2_len + m->outer_l3_len);
@ -3666,10 +3666,10 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
uint32_t paylen, hdr_len, l4_proto;
struct rte_udp_hdr *udp_hdr;
if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)))
if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
return;
if (ol_flags & PKT_TX_OUTER_IPV4) {
if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto))
return;
} else {
@ -3678,7 +3678,7 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m)
}
/* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */
if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) {
if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
hdr_len = m->l2_len + m->l3_len + m->l4_len;
hdr_len += m->outer_l2_len + m->outer_l3_len;
paylen = m->pkt_len - hdr_len;
@ -3704,7 +3704,7 @@ hns3_check_tso_pkt_valid(struct rte_mbuf *m)
return -EINVAL;
hdr_len = m->l2_len + m->l3_len + m->l4_len;
hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ?
hdr_len += (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
m->outer_l2_len + m->outer_l3_len : 0;
if (hdr_len > HNS3_MAX_TSO_HDR_SIZE)
return -EINVAL;
@ -3754,12 +3754,12 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
* implementation function named hns3_prep_pkts to inform users that
* these packets will be discarded.
*/
if (m->ol_flags & PKT_TX_QINQ)
if (m->ol_flags & RTE_MBUF_F_TX_QINQ)
return -EINVAL;
eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
if (m->ol_flags & PKT_TX_VLAN)
if (m->ol_flags & RTE_MBUF_F_TX_VLAN)
return -EINVAL;
/* Ensure the incoming packet is not a QinQ packet */
@ -3779,7 +3779,7 @@ hns3_udp_cksum_help(struct rte_mbuf *m)
uint16_t cksum = 0;
uint32_t l4_len;
if (ol_flags & PKT_TX_IPV4) {
if (ol_flags & RTE_MBUF_F_TX_IPV4) {
struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
struct rte_ipv4_hdr *, m->l2_len);
l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
@ -3810,8 +3810,8 @@ hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
uint16_t dst_port;
if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
ol_flags & PKT_TX_TUNNEL_MASK ||
(ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)
ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK ||
(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_UDP_CKSUM)
return true;
/*
* A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
@ -3828,7 +3828,7 @@ hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
case RTE_VXLAN_GPE_DEFAULT_PORT:
case RTE_GENEVE_DEFAULT_PORT:
udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
m->ol_flags = ol_flags & ~PKT_TX_L4_MASK;
m->ol_flags = ol_flags & ~RTE_MBUF_F_TX_L4_MASK;
return false;
default:
return true;

View File

@ -471,7 +471,7 @@ struct hns3_tx_queue {
* - HNS3_SPECIAL_PORT_SW_CKSUM_MODE
* In this mode, HW can not do checksum for special UDP port like
* 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel
* packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need
* packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need
* do the checksum for these packets to avoid a checksum error.
*
* - HNS3_SPECIAL_PORT_HW_CKSUM_MODE
@ -545,12 +545,11 @@ struct hns3_queue_info {
unsigned int socket_id;
};
#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_OUTER_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_IP_CKSUM | \
PKT_TX_TCP_SEG | \
PKT_TX_L4_MASK)
#define HNS3_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_TCP_SEG | \
RTE_MBUF_F_TX_L4_MASK)
enum hns3_cksum_status {
HNS3_CKSUM_NONE = 0,
@ -574,29 +573,29 @@ hns3_rx_set_cksum_flag(struct hns3_rx_queue *rxq,
BIT(HNS3_RXD_OL4E_B))
if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) {
rxm->ol_flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
rxm->ol_flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
rxq->dfx_stats.l3_csum_errors++;
} else {
rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
rxq->dfx_stats.l4_csum_errors++;
} else {
rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B)))
rxq->dfx_stats.ol3_csum_errors++;
if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
rxm->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
rxq->dfx_stats.ol4_csum_errors++;
}
}

View File

@ -105,7 +105,7 @@ hns3_desc_parse_field(struct hns3_rx_queue *rxq,
pkt = sw_ring[i].mbuf;
/* init rte_mbuf.rearm_data last 64-bit */
pkt->ol_flags = PKT_RX_RSS_HASH;
pkt->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
l234_info = rxdp[i].rx.l234_info;
ol_info = rxdp[i].rx.ol_info;

View File

@ -43,7 +43,7 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq,
for (i = 0; i < (int)bd_vld_num; i++) {
/* init rte_mbuf.rearm_data last 64-bit */
rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH;
rx_pkts[i]->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i],
key->l234_info[i]);

View File

@ -44,42 +44,39 @@
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
#ifdef RTE_LIBRTE_IEEE1588
#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
#define I40E_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define I40E_TX_IEEE1588_TMST 0
#endif
#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
#define I40E_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
#define I40E_TX_OFFLOAD_MASK ( \
PKT_TX_OUTER_IPV4 | \
PKT_TX_OUTER_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_TCP_SEG | \
PKT_TX_QINQ | \
PKT_TX_VLAN | \
PKT_TX_TUNNEL_MASK | \
#define I40E_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV4 | \
RTE_MBUF_F_TX_OUTER_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
RTE_MBUF_F_TX_TCP_SEG | \
RTE_MBUF_F_TX_QINQ | \
RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_TUNNEL_MASK | \
I40E_TX_IEEE1588_TMST)
#define I40E_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
#define I40E_TX_OFFLOAD_SIMPLE_SUP_MASK ( \
PKT_TX_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_OUTER_IPV4 | \
PKT_TX_OUTER_IPV6)
#define I40E_TX_OFFLOAD_SIMPLE_SUP_MASK (RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_OUTER_IPV4 | \
RTE_MBUF_F_TX_OUTER_IPV6)
#define I40E_TX_OFFLOAD_SIMPLE_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_SIMPLE_SUP_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_SIMPLE_SUP_MASK)
static int
i40e_monitor_callback(const uint64_t value,
@ -119,7 +116,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
@ -130,8 +127,8 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
@ -154,11 +151,11 @@ i40e_rxd_status_to_pkt_flags(uint64_t qword)
/* Check if RSS_HASH */
flags = (((qword >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
I40E_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
/* Check if FDIR Match */
flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
PKT_RX_FDIR : 0);
RTE_MBUF_F_RX_FDIR : 0);
return flags;
}
@ -171,22 +168,22 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
#define I40E_RX_ERR_BITS 0x3f
if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
flags |= PKT_RX_IP_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
flags |= PKT_RX_L4_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
flags |= PKT_RX_L4_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
return flags;
}
@ -205,9 +202,9 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
if ((mb->packet_type & RTE_PTYPE_L2_MASK)
== RTE_PTYPE_L2_ETHER_TIMESYNC)
pkt_flags = PKT_RX_IEEE1588_PTP;
pkt_flags = RTE_MBUF_F_RX_IEEE1588_PTP;
if (tsyn & 0x04) {
pkt_flags |= PKT_RX_IEEE1588_TMST;
pkt_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
mb->timesync = tsyn & 0x03;
}
@ -233,21 +230,21 @@ i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
flags |= PKT_RX_FDIR_ID;
flags |= RTE_MBUF_F_RX_FDIR_ID;
} else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
flags |= PKT_RX_FDIR_FLX;
flags |= RTE_MBUF_F_RX_FDIR_FLX;
}
if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
mb->hash.fdir.lo =
rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
flags |= PKT_RX_FDIR_FLX;
flags |= RTE_MBUF_F_RX_FDIR_FLX;
}
#else
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
flags |= PKT_RX_FDIR_ID;
flags |= RTE_MBUF_F_RX_FDIR_ID;
#endif
return flags;
}
@ -258,11 +255,11 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
uint32_t *cd_tunneling)
{
/* EIPT: External (outer) IP header type */
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
else if (ol_flags & PKT_TX_OUTER_IPV4)
else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
else if (ol_flags & PKT_TX_OUTER_IPV6)
else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
/* EIPLEN: External (outer) IP header length, in DWords */
@ -270,15 +267,15 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
/* L4TUNT: L4 Tunneling Type */
switch (ol_flags & PKT_TX_TUNNEL_MASK) {
case PKT_TX_TUNNEL_IPIP:
switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
case RTE_MBUF_F_TX_TUNNEL_IPIP:
/* for non UDP / GRE tunneling, set to 00b */
break;
case PKT_TX_TUNNEL_VXLAN:
case PKT_TX_TUNNEL_GENEVE:
case RTE_MBUF_F_TX_TUNNEL_VXLAN:
case RTE_MBUF_F_TX_TUNNEL_GENEVE:
*cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
break;
case PKT_TX_TUNNEL_GRE:
case RTE_MBUF_F_TX_TUNNEL_GRE:
*cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
break;
default:
@ -306,7 +303,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
union i40e_tx_offload tx_offload)
{
/* Set MACLEN */
if (ol_flags & PKT_TX_TUNNEL_MASK)
if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
else
@ -314,21 +311,21 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
if (ol_flags & PKT_TX_IP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV4) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV6) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2)
<< I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
}
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2)
<< I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
@ -336,18 +333,18 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
}
/* Enable L4 checksum offloads */
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
@ -526,10 +523,10 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
#ifdef RTE_LIBRTE_IEEE1588
@ -789,10 +786,10 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->packet_type =
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
#ifdef RTE_LIBRTE_IEEE1588
@ -957,10 +954,10 @@ i40e_recv_scattered_pkts(void *rx_queue,
first_seg->packet_type =
ptype_tbl[(uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg);
#ifdef RTE_LIBRTE_IEEE1588
@ -1004,13 +1001,13 @@ i40e_recv_scattered_pkts(void *rx_queue,
static inline uint16_t
i40e_calc_context_desc(uint64_t flags)
{
static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TCP_SEG |
PKT_TX_QINQ |
PKT_TX_TUNNEL_MASK;
static uint64_t mask = RTE_MBUF_F_TX_OUTER_IP_CKSUM |
RTE_MBUF_F_TX_TCP_SEG |
RTE_MBUF_F_TX_QINQ |
RTE_MBUF_F_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
mask |= RTE_MBUF_F_TX_IEEE1588_TMST;
#endif
return (flags & mask) ? 1 : 0;
@ -1029,7 +1026,7 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
}
hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
cd_cmd = I40E_TX_CTX_DESC_TSO;
@ -1122,7 +1119,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* the mbuf data size exceeds max data size that hw allows
* per tx desc.
*/
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
nb_ctx);
else
@ -1151,7 +1148,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
@ -1161,7 +1158,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
if (ol_flags & PKT_TX_TUNNEL_MASK)
if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
i40e_parse_tunneling_params(ol_flags, tx_offload,
&cd_tunneling_params);
/* Enable checksum offloading */
@ -1186,12 +1183,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* TSO enabled means no timestamp */
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
i40e_set_tso_ctx(tx_pkt, tx_offload);
else {
#ifdef RTE_LIBRTE_IEEE1588
if (ol_flags & PKT_TX_IEEE1588_TMST)
if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT);
@ -1200,7 +1197,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
if (ol_flags & PKT_TX_QINQ) {
if (ol_flags & RTE_MBUF_F_TX_QINQ) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
@ -1239,7 +1236,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
while ((ol_flags & PKT_TX_TCP_SEG) &&
while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
txd->buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
@ -1580,7 +1577,7 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
ol_flags = m->ol_flags;
/* Check for m->nb_segs to not exceed the limits. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
m->pkt_len > I40E_FRAME_SIZE_MAX) {
rte_errno = EINVAL;

View File

@ -117,26 +117,26 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
/* map rss and vlan type to rss hash and vlan flag */
const vector unsigned char vlan_flags = (vector unsigned char){
0, 0, 0, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
const vector unsigned char rss_flags = (vector unsigned char){
0, PKT_RX_FDIR, 0, 0,
0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
0, RTE_MBUF_F_RX_FDIR, 0, 0,
0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
0, 0, 0, 0,
0, 0, 0, 0};
const vector unsigned char l3_l4e_flags = (vector unsigned char){
0,
PKT_RX_IP_CKSUM_BAD,
PKT_RX_L4_CKSUM_BAD,
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_BAD,
RTE_MBUF_F_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD,
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD,
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
| RTE_MBUF_F_RX_IP_CKSUM_BAD,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);

View File

@ -79,7 +79,7 @@ desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp,
* - Position that bit correctly based on packet number
* - OR in the resulting bit to mbuf_flags
*/
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
__m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13,
0, 0, 0, 1 << 13);
__m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask);
@ -209,8 +209,8 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* destination
*/
const __m256i vlan_flags_shuf = _mm256_set_epi32(
0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
/*
* data to be shuffled by result of flag mask, shifted down 11.
* If RSS/FDIR bits are set, shuffle moves appropriate flags in
@ -218,11 +218,11 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*/
const __m256i rss_flags_shuf = _mm256_set_epi8(
0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
0, 0, RTE_MBUF_F_RX_FDIR, 0, /* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
0, 0, RTE_MBUF_F_RX_FDIR, 0);
/*
* data to be shuffled by the result of the flags mask shifted by 22
@ -230,37 +230,37 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask = _mm256_set1_epi32(
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
@ -443,7 +443,7 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
* Then OR FDIR flags to mbuf_flags on FDIR ID hit.
*/
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13);
const __m256i fdir_mask = _mm256_cmpeq_epi32(fdir, fdir_id);
__m256i fdir_bits = _mm256_and_si256(fdir_mask, pkt_fdir_bit);

View File

@ -205,7 +205,7 @@ desc_fdir_processing_32b(volatile union i40e_rx_desc *rxdp,
* - Position that bit correctly based on packet number
* - OR in the resulting bit to mbuf_flags
*/
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
__m256i mbuf_flag_mask = _mm256_set_epi32(0, 0, 0, 1 << 13,
0, 0, 0, 1 << 13);
__m256i desc_flag_bit = _mm256_and_si256(mbuf_flag_mask, fdir_mask);
@ -320,8 +320,8 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* destination
*/
const __m256i vlan_flags_shuf = _mm256_set_epi32
(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
(0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
/* data to be shuffled by result of flag mask, shifted down 11.
* If RSS/FDIR bits are set, shuffle moves appropriate flags in
@ -329,11 +329,11 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*/
const __m256i rss_flags_shuf = _mm256_set_epi8
(0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
0, 0, RTE_MBUF_F_RX_FDIR, 0, /* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
0, 0, RTE_MBUF_F_RX_FDIR, 0);
/* data to be shuffled by the result of the flags mask shifted by 22
* bits. This gives use the l3_l4 flags.
@ -341,33 +341,33 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
const __m256i l3_l4_flags_shuf = _mm256_set_epi8
(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
const __m256i cksum_mask = _mm256_set1_epi32
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
uint16_t i, received;
@ -572,7 +572,7 @@ _recv_raw_pkts_vec_avx512(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
* Then OR FDIR flags to mbuf_flags on FDIR ID hit.
*/
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(1 << 13);
const __m256i fdir_mask =
_mm256_cmpeq_epi32(fdir, fdir_id);

View File

@ -93,43 +93,43 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
const uint32x4_t cksum_mask = {
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD};
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD};
/* map rss and vlan type to rss hash and vlan flag */
const uint8x16_t vlan_flags = {
0, 0, 0, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
const uint8x16_t rss_flags = {
0, PKT_RX_FDIR, 0, 0,
0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
0, RTE_MBUF_F_RX_FDIR, 0, 0,
0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
0, 0, 0, 0,
0, 0, 0, 0};
const uint8x16_t l3_l4e_flags = {
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),

View File

@ -143,7 +143,7 @@ descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt)
* correct location in the mbuf->olflags
*/
const uint32_t FDIR_ID_BIT_SHIFT = 13;
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
v_fd_id_mask = _mm_srli_epi32(v_fd_id_mask, 31);
v_fd_id_mask = _mm_slli_epi32(v_fd_id_mask, FDIR_ID_BIT_SHIFT);
@ -203,9 +203,9 @@ descs_to_fdir_16b(__m128i fltstat, __m128i descs[4], struct rte_mbuf **rx_pkt)
__m128i v_desc0_mask = _mm_and_si128(v_desc_fdir_mask, v_desc0_shift);
descs[0] = _mm_blendv_epi8(descs[0], _mm_setzero_si128(), v_desc0_mask);
/* Shift to 1 or 0 bit per u32 lane, then to PKT_RX_FDIR_ID offset */
/* Shift to 1 or 0 bit per u32 lane, then to RTE_MBUF_F_RX_FDIR_ID offset */
const uint32_t FDIR_ID_BIT_SHIFT = 13;
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << FDIR_ID_BIT_SHIFT));
__m128i v_mask_one_bit = _mm_srli_epi32(v_fdir_id_mask, 31);
return _mm_slli_epi32(v_mask_one_bit, FDIR_ID_BIT_SHIFT);
}
@ -228,44 +228,44 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
const __m128i cksum_mask = _mm_set_epi32(
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
0, 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
0, 0, RTE_MBUF_F_RX_FDIR, 0);
const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
/* Unpack "status" from quadword 1, bits 0:32 */
vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);

View File

@ -379,14 +379,14 @@ iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
#endif
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#endif
@ -403,13 +403,13 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
@ -445,13 +445,13 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
@ -1044,7 +1044,7 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
} else {
@ -1072,7 +1072,7 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
#endif
if (vlan_tci) {
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci = vlan_tci;
}
}
@ -1089,26 +1089,26 @@ iavf_rxd_to_pkt_flags(uint64_t qword)
/* Check if RSS_HASH */
flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
/* Check if FDIR Match */
flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
PKT_RX_FDIR : 0);
RTE_MBUF_F_RX_FDIR : 0);
if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
flags |= PKT_RX_IP_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
flags |= PKT_RX_L4_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
flags |= PKT_RX_L4_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
/* TODO: Oversize error bit is not processed here */
@ -1129,12 +1129,12 @@ iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
flags |= PKT_RX_FDIR_ID;
flags |= RTE_MBUF_F_RX_FDIR_ID;
}
#else
mb->hash.fdir.hi =
rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
flags |= PKT_RX_FDIR_ID;
flags |= RTE_MBUF_F_RX_FDIR_ID;
#endif
return flags;
}
@ -1158,22 +1158,22 @@ iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
return 0;
if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
flags |= PKT_RX_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
flags |= PKT_RX_IP_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
flags |= PKT_RX_L4_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
flags |= PKT_RX_L4_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
return flags;
}
@ -1292,11 +1292,11 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
ptype_tbl[(uint8_t)((qword1 &
IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
rxm->ol_flags |= pkt_flags;
@ -1693,11 +1693,11 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
ptype_tbl[(uint8_t)((qword1 &
IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
first_seg->ol_flags |= pkt_flags;
@ -1862,11 +1862,11 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
IAVF_RXD_QW1_PTYPE_MASK) >>
IAVF_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(
rxdp[j].wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
if (pkt_flags & RTE_MBUF_F_RX_FDIR)
pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
mb->ol_flags |= pkt_flags;
@ -2072,9 +2072,9 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
static inline uint16_t
iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
{
if (flags & PKT_TX_TCP_SEG)
if (flags & RTE_MBUF_F_TX_TCP_SEG)
return 1;
if (flags & PKT_TX_VLAN &&
if (flags & RTE_MBUF_F_TX_VLAN &&
vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
return 1;
return 0;
@ -2091,21 +2091,21 @@ iavf_txd_enable_checksum(uint64_t ol_flags,
IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
if (ol_flags & PKT_TX_IP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV4) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV6) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
}
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
@ -2113,18 +2113,18 @@ iavf_txd_enable_checksum(uint64_t ol_flags,
}
/* Enable L4 checksum offloads */
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
@ -2260,7 +2260,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
if (ol_flags & PKT_TX_VLAN &&
if (ol_flags & RTE_MBUF_F_TX_VLAN &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
@ -2297,12 +2297,12 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* TSO enabled */
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
iavf_set_tso_ctx(tx_pkt, tx_offload);
if (ol_flags & PKT_TX_VLAN &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
if (ol_flags & RTE_MBUF_F_TX_VLAN &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
cd_l2tag2 = tx_pkt->vlan_tci;
@ -2415,7 +2415,7 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
ol_flags = m->ol_flags;
/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
rte_errno = EINVAL;
return i;
@ -2446,7 +2446,7 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) {
ol_flags & (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN)) {
ret = iavf_check_vlan_up2tc(txq, m);
if (ret != 0) {
rte_errno = -ret;

View File

@ -52,23 +52,21 @@
#define IAVF_TSO_MAX_SEG UINT8_MAX
#define IAVF_TX_MAX_MTU_SEG 8
#define IAVF_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG)
#define IAVF_TX_OFFLOAD_MASK ( \
PKT_TX_OUTER_IPV6 | \
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
RTE_MBUF_F_TX_OUTER_IPV4 | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG)
#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
/**
* Rx Flex Descriptors

View File

@ -127,8 +127,8 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
* destination
*/
const __m256i vlan_flags_shuf =
_mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
_mm256_set_epi32(0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
/**
* data to be shuffled by result of flag mask, shifted down 11.
* If RSS/FDIR bits are set, shuffle moves appropriate flags in
@ -136,11 +136,11 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
*/
const __m256i rss_flags_shuf =
_mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH,
0, 0, 0, 0, RTE_MBUF_F_RX_FDIR, 0,/* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
0, 0, 0, 0, PKT_RX_FDIR, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH,
0, 0, 0, 0, RTE_MBUF_F_RX_FDIR, 0);
/**
* data to be shuffled by the result of the flags mask shifted by 22
@ -148,33 +148,33 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
_mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
_mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */
@ -502,10 +502,10 @@ static inline __m256i
flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
PKT_RX_FDIR_ID);
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
@ -629,36 +629,36 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
_mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
_mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/**
* data to be shuffled by result of flag mask, shifted down 12.
* If RSS(bit12)/VLAN(bit13) are set,
@ -667,27 +667,27 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
const __m256i rss_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, 0);
RTE_MBUF_F_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, 0);
const __m256i vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0);
uint16_t i, received;
@ -1026,8 +1026,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0,
PKT_RX_VLAN |
PKT_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN |
RTE_MBUF_F_RX_VLAN_STRIPPED,
0);
vlan_flags =

View File

@ -431,8 +431,8 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
* destination
*/
const __m256i vlan_flags_shuf =
_mm256_set_epi32(0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0,
0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0);
_mm256_set_epi32(0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0,
0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0);
#endif
#ifdef IAVF_RX_RSS_OFFLOAD
@ -443,11 +443,11 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
*/
const __m256i rss_flags_shuf =
_mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
0, 0, 0, 0, PKT_RX_FDIR, 0,/* end up 128-bits */
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH,
0, 0, 0, 0, RTE_MBUF_F_RX_FDIR, 0,/* end up 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH,
0, 0, 0, 0, PKT_RX_FDIR, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH,
0, 0, 0, 0, RTE_MBUF_F_RX_FDIR, 0);
#endif
#ifdef IAVF_RX_CSUM_OFFLOAD
@ -457,33 +457,33 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
_mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
_mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
#endif
#if defined(IAVF_RX_CSUM_OFFLOAD) || defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
@ -688,10 +688,10 @@ static __rte_always_inline __m256i
flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
PKT_RX_FDIR_ID);
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
@ -978,36 +978,36 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
*/
const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/* second 128-bits */
0, 0, 0, 0, 0, 0, 0, 0,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
_mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
_mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
#endif
#if defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD)
/**
@ -1019,28 +1019,28 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, 0);
RTE_MBUF_F_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, 0);
const __m256i vlan_flags_shuf = _mm256_set_epi8
(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0);
#endif
@ -1275,8 +1275,8 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0,
PKT_RX_VLAN |
PKT_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN |
RTE_MBUF_F_RX_VLAN_STRIPPED,
0);
vlan_flags =

View File

@ -326,33 +326,33 @@ iavf_txd_enable_offload(__rte_unused struct rte_mbuf *tx_pkt,
IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
if (ol_flags & PKT_TX_IP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
td_offset |= (tx_pkt->l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV4) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
td_offset |= (tx_pkt->l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV6) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
td_offset |= (tx_pkt->l3_len >> 2) <<
IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
}
/* Enable L4 checksum offloads */
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
@ -365,7 +365,7 @@ iavf_txd_enable_offload(__rte_unused struct rte_mbuf *tx_pkt,
#endif
#ifdef IAVF_TX_VLAN_QINQ_OFFLOAD
if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
IAVF_TXD_QW1_L2TAG1_SHIFT);

View File

@ -108,42 +108,42 @@ desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
const __m128i cksum_mask = _mm_set_epi32(
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_OUTER_IP_CKSUM_BAD);
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
0, 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
0, 0, RTE_MBUF_F_RX_FDIR, 0);
const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
PKT_RX_IP_CKSUM_BAD >> 1,
(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
@ -193,10 +193,10 @@ static inline __m128i
flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
PKT_RX_FDIR_ID);
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m128i pkt_fdir_bit = _mm_set1_epi32(RTE_MBUF_F_RX_FDIR |
RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
__m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
@ -225,43 +225,43 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
0x3070, 0x3070);
const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD);
const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map the checksum, rss and vlan fields to the checksum, rss
* and vlan flag
*/
const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_RSS_HASH, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_RSS_HASH, 0);
/* merge 4 descriptors */
flags = _mm_unpackhi_epi32(descs[0], descs[1]);

View File

@ -10,11 +10,10 @@
#include "ice_rxtx.h"
#include "ice_rxtx_vec_common.h"
#define ICE_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
#define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM)
/* Offset of mbuf dynamic field for protocol extraction data */
int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
@ -88,13 +87,13 @@ ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
#endif
@ -112,14 +111,14 @@ ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
#endif
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#endif
@ -136,13 +135,13 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
@ -178,13 +177,13 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
stat_err = rte_le_to_cpu_16(desc->status_error0);
if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
@ -1490,27 +1489,27 @@ ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
return 0;
if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return flags;
}
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
flags |= PKT_RX_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else
flags |= PKT_RX_IP_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
flags |= PKT_RX_L4_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
else
flags |= PKT_RX_L4_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
else
flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
return flags;
}
@ -1520,7 +1519,7 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
{
if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
(1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
@ -1532,8 +1531,8 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
(1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
@ -1627,7 +1626,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
rxq->time_high =
rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
mb->timesync = rxq->queue_id;
pkt_flags |= PKT_RX_IEEE1588_PTP;
pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
}
#endif
mb->ol_flags |= pkt_flags;
@ -1945,7 +1944,7 @@ ice_recv_scattered_pkts(void *rx_queue,
rxq->time_high =
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
first_seg->timesync = rxq->queue_id;
pkt_flags |= PKT_RX_IEEE1588_PTP;
pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
}
#endif
first_seg->ol_flags |= pkt_flags;
@ -2376,7 +2375,7 @@ ice_recv_pkts(void *rx_queue,
rxq->time_high =
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
rxm->timesync = rxq->queue_id;
pkt_flags |= PKT_RX_IEEE1588_PTP;
pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
}
#endif
rxm->ol_flags |= pkt_flags;
@ -2410,11 +2409,11 @@ ice_parse_tunneling_params(uint64_t ol_flags,
uint32_t *cd_tunneling)
{
/* EIPT: External (outer) IP header type */
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
else if (ol_flags & PKT_TX_OUTER_IPV4)
else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
else if (ol_flags & PKT_TX_OUTER_IPV6)
else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
*cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
/* EIPLEN: External (outer) IP header length, in DWords */
@ -2422,16 +2421,16 @@ ice_parse_tunneling_params(uint64_t ol_flags,
ICE_TXD_CTX_QW0_EIPLEN_S;
/* L4TUNT: L4 Tunneling Type */
switch (ol_flags & PKT_TX_TUNNEL_MASK) {
case PKT_TX_TUNNEL_IPIP:
switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
case RTE_MBUF_F_TX_TUNNEL_IPIP:
/* for non UDP / GRE tunneling, set to 00b */
break;
case PKT_TX_TUNNEL_VXLAN:
case PKT_TX_TUNNEL_GTP:
case PKT_TX_TUNNEL_GENEVE:
case RTE_MBUF_F_TX_TUNNEL_VXLAN:
case RTE_MBUF_F_TX_TUNNEL_GTP:
case RTE_MBUF_F_TX_TUNNEL_GENEVE:
*cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
break;
case PKT_TX_TUNNEL_GRE:
case RTE_MBUF_F_TX_TUNNEL_GRE:
*cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
break;
default:
@ -2468,7 +2467,7 @@ ice_txd_enable_checksum(uint64_t ol_flags,
union ice_tx_offload tx_offload)
{
/* Set MACLEN */
if (ol_flags & PKT_TX_TUNNEL_MASK)
if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< ICE_TX_DESC_LEN_MACLEN_S;
else
@ -2476,21 +2475,21 @@ ice_txd_enable_checksum(uint64_t ol_flags,
<< ICE_TX_DESC_LEN_MACLEN_S;
/* Enable L3 checksum offloads */
if (ol_flags & PKT_TX_IP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & PKT_TX_IPV4) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & PKT_TX_IPV6) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
*td_offset |= (tx_offload.l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
}
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (tx_offload.l4_len >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
@ -2498,18 +2497,18 @@ ice_txd_enable_checksum(uint64_t ol_flags,
}
/* Enable L4 checksum offloads */
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
@ -2587,11 +2586,11 @@ ice_build_ctob(uint32_t td_cmd,
static inline uint16_t
ice_calc_context_desc(uint64_t flags)
{
static uint64_t mask = PKT_TX_TCP_SEG |
PKT_TX_QINQ |
PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_MASK |
PKT_TX_IEEE1588_TMST;
static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
RTE_MBUF_F_TX_QINQ |
RTE_MBUF_F_TX_OUTER_IP_CKSUM |
RTE_MBUF_F_TX_TUNNEL_MASK |
RTE_MBUF_F_TX_IEEE1588_TMST;
return (flags & mask) ? 1 : 0;
}
@ -2609,7 +2608,7 @@ ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
}
hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
cd_cmd = ICE_TX_CTX_DESC_TSO;
@ -2696,7 +2695,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* the mbuf data size exceeds max data size that hw allows
* per tx desc.
*/
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
nb_ctx);
else
@ -2725,14 +2724,14 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
/* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
if (ol_flags & PKT_TX_TUNNEL_MASK)
if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
ice_parse_tunneling_params(ol_flags, tx_offload,
&cd_tunneling_params);
@ -2756,10 +2755,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe->mbuf = NULL;
}
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
ice_set_tso_ctx(tx_pkt, tx_offload);
else if (ol_flags & PKT_TX_IEEE1588_TMST)
else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cd_type_cmd_tso_mss |=
((uint64_t)ICE_TX_CTX_DESC_TSYN <<
ICE_TXD_CTX_QW1_CMD_S);
@ -2768,7 +2767,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_cpu_to_le_32(cd_tunneling_params);
/* TX context descriptor based double VLAN insert */
if (ol_flags & PKT_TX_QINQ) {
if (ol_flags & RTE_MBUF_F_TX_QINQ) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
@ -2796,7 +2795,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
while ((ol_flags & PKT_TX_TCP_SEG) &&
while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz =
@ -3385,7 +3384,7 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
m = tx_pkts[i];
ol_flags = m->ol_flags;
if (ol_flags & PKT_TX_TCP_SEG &&
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
(m->tso_segsz < ICE_MIN_TSO_MSS ||
m->tso_segsz > ICE_MAX_TSO_MSS ||
m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {

View File

@ -21,10 +21,10 @@ static __rte_always_inline __m256i
ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
PKT_RX_FDIR_ID);
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
@ -143,82 +143,82 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* bits. This gives use the l3_l4 flags.
*/
const __m256i l3_l4_flags_shuf =
_mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
_mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/**
* second 128-bits
* shift right 20 bits to use the low two bits to indicate
* outer checksum status
* shift right 1 bit to make sure it not exceed 255
*/
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
_mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_OUTER_L4_CKSUM_MASK);
_mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK);
/**
* data to be shuffled by result of flag mask, shifted down 12.
* If RSS(bit12)/VLAN(bit13) are set,
@ -227,16 +227,16 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_RSS_HASH, 0,
/* end up 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_RSS_HASH, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_RSS_HASH, 0);
RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */

View File

@ -136,10 +136,10 @@ static inline __m256i
ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
PKT_RX_FDIR_ID);
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m256i pkt_fdir_bit = _mm256_set1_epi32(RTE_MBUF_F_RX_FDIR |
RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
__m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
@ -243,82 +243,82 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
* bits. This gives use the l3_l4 flags.
*/
const __m256i l3_l4_flags_shuf =
_mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
_mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/**
* second 128-bits
* shift right 20 bits to use the low two bits to indicate
* outer checksum status
* shift right 1 bit to make sure it not exceed 255
*/
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m256i cksum_mask =
_mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_OUTER_L4_CKSUM_MASK);
_mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK);
/**
* data to be shuffled by result of flag mask, shifted down 12.
* If RSS(bit12)/VLAN(bit13) are set,
@ -327,16 +327,16 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_RSS_HASH, 0,
/* 2nd 128-bits */
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_RSS_HASH, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_RSS_HASH, 0);
uint16_t i, received;

View File

@ -369,33 +369,33 @@ ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
ICE_TX_DESC_LEN_MACLEN_S;
/* Enable L3 checksum offload */
if (ol_flags & PKT_TX_IP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
td_offset |= (tx_pkt->l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & PKT_TX_IPV4) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
td_offset |= (tx_pkt->l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
} else if (ol_flags & PKT_TX_IPV6) {
} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
td_offset |= (tx_pkt->l3_len >> 2) <<
ICE_TX_DESC_LEN_IPLEN_S;
}
/* Enable L4 checksum offloads */
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
@ -407,7 +407,7 @@ ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
/* Tx VLAN/QINQ insertion Offload */
if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
ICE_TXD_QW1_L2TAG1_S);

View File

@ -14,10 +14,10 @@ static inline __m128i
ice_flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
PKT_RX_FDIR_ID);
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
const __m128i pkt_fdir_bit = _mm_set1_epi32(RTE_MBUF_F_RX_FDIR |
RTE_MBUF_F_RX_FDIR_ID);
/* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
__m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
@ -116,72 +116,72 @@ ice_rx_desc_to_olflags_v(struct ice_rx_queue *rxq, __m128i descs[4],
*/
const __m128i desc_mask = _mm_set_epi32(0x30f0, 0x30f0,
0x30f0, 0x30f0);
const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
PKT_RX_OUTER_L4_CKSUM_MASK |
PKT_RX_OUTER_IP_CKSUM_BAD);
const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_MASK |
RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map the checksum, rss and vlan fields to the checksum, rss
* and vlan flag
*/
const __m128i cksum_flags =
_mm_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
_mm_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 |
RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
/**
* shift right 20 bits to use the low two bits to indicate
* outer checksum status
* shift right 1 bit to make sure it not exceed 255
*/
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
PKT_RX_IP_CKSUM_GOOD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_BAD) >> 1,
(PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_IP_CKSUM_GOOD) >> 1);
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
PKT_RX_RSS_HASH, 0);
RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
RTE_MBUF_F_RX_RSS_HASH, 0);
/* merge 4 descriptors */
flags = _mm_unpackhi_epi32(descs[0], descs[1]);

View File

@ -74,17 +74,16 @@
#define IGC_TSO_MAX_MSS 9216
/* Bit Mask to indicate what bits required for building TX context */
#define IGC_TX_OFFLOAD_MASK ( \
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
PKT_TX_UDP_SEG)
#define IGC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV4 | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG | \
RTE_MBUF_F_TX_UDP_SEG)
#define IGC_TX_OFFLOAD_SEG (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)
#define IGC_TX_OFFLOAD_SEG (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)
#define IGC_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
#define IGC_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
@ -92,7 +91,7 @@
/* L4 Packet TYPE of Reserved */
#define IGC_ADVTXD_TUCMD_L4T_RSV 0x00001800
#define IGC_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
#define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
@ -215,16 +214,18 @@ struct igc_tx_queue {
static inline uint64_t
rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
{
static uint64_t l4_chksum_flags[] = {0, 0, PKT_RX_L4_CKSUM_GOOD,
PKT_RX_L4_CKSUM_BAD};
static uint64_t l4_chksum_flags[] = {0, 0,
RTE_MBUF_F_RX_L4_CKSUM_GOOD,
RTE_MBUF_F_RX_L4_CKSUM_BAD};
static uint64_t l3_chksum_flags[] = {0, 0, PKT_RX_IP_CKSUM_GOOD,
PKT_RX_IP_CKSUM_BAD};
static uint64_t l3_chksum_flags[] = {0, 0,
RTE_MBUF_F_RX_IP_CKSUM_GOOD,
RTE_MBUF_F_RX_IP_CKSUM_BAD};
uint64_t pkt_flags = 0;
uint32_t tmp;
if (statuserr & IGC_RXD_STAT_VP)
pkt_flags |= PKT_RX_VLAN_STRIPPED;
pkt_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
tmp = !!(statuserr & (IGC_RXD_STAT_L4CS | IGC_RXD_STAT_UDPCS));
tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_L4E);
@ -332,10 +333,10 @@ rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,
rxm->vlan_tci = rte_le_to_cpu_16(rxd->wb.upper.vlan);
pkt_flags = (hlen_type_rss & IGC_RXD_RSS_TYPE_MASK) ?
PKT_RX_RSS_HASH : 0;
RTE_MBUF_F_RX_RSS_HASH : 0;
if (hlen_type_rss & IGC_RXD_VPKT)
pkt_flags |= PKT_RX_VLAN;
pkt_flags |= RTE_MBUF_F_RX_VLAN;
pkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);
@ -1449,7 +1450,7 @@ check_tso_para(uint64_t ol_req, union igc_tx_offload ol_para)
if (ol_para.tso_segsz > IGC_TSO_MAX_MSS || ol_para.l2_len +
ol_para.l3_len + ol_para.l4_len > IGC_TSO_MAX_HDRLEN) {
ol_req &= ~IGC_TX_OFFLOAD_SEG;
ol_req |= PKT_TX_TCP_CKSUM;
ol_req |= RTE_MBUF_F_TX_TCP_CKSUM;
}
return ol_req;
}
@ -1511,20 +1512,20 @@ igc_set_xmit_ctx(struct igc_tx_queue *txq,
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);
if (ol_flags & PKT_TX_VLAN)
if (ol_flags & RTE_MBUF_F_TX_VLAN)
tx_offload_mask.vlan_tci = 0xffff;
/* check if TCP segmentation required for this packet */
if (ol_flags & IGC_TX_OFFLOAD_SEG) {
/* implies IP cksum in IPv4 */
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4 |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
else
type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV6 |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
else
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
@ -1535,26 +1536,26 @@ igc_set_xmit_ctx(struct igc_tx_queue *txq,
mss_l4len_idx |= (uint32_t)tx_offload.l4_len <<
IGC_ADVTXD_L4LEN_SHIFT;
} else { /* no TSO, check if hardware checksum is needed */
if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4;
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_TCP_CKSUM:
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= (uint32_t)sizeof(struct rte_tcp_hdr)
<< IGC_ADVTXD_L4LEN_SHIFT;
break;
case PKT_TX_UDP_CKSUM:
case RTE_MBUF_F_TX_UDP_CKSUM:
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= (uint32_t)sizeof(struct rte_udp_hdr)
<< IGC_ADVTXD_L4LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP |
IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= (uint32_t)sizeof(struct rte_sctp_hdr)
@ -1585,7 +1586,7 @@ tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};
cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN) != 0];
cmdtype = vlan_cmd[(ol_flags & RTE_MBUF_F_TX_VLAN) != 0];
cmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
return cmdtype;
}
@ -1597,8 +1598,8 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
static const uint32_t l3_olinfo[2] = {0, IGC_ADVTXD_POPTS_IXSM};
uint32_t tmp;
tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
tmp |= l4_olinfo[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
return tmp;
}
@ -1755,7 +1756,7 @@ igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* Timer 0 should be used to for packet timestamping,
* sample the packet timestamp to reg 0
*/
if (ol_flags & PKT_TX_IEEE1588_TMST)
if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= IGC_ADVTXD_MAC_TSTAMP;
if (tx_ol_req) {

View File

@ -257,7 +257,7 @@ ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
(l3_hdr + txm->l3_len);
if (txm->ol_flags & PKT_TX_IP_CKSUM) {
if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
ipv4_hdr->hdr_checksum = 0;
tcp_hdr->cksum = 0;
@ -278,7 +278,7 @@ ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
(l3_hdr + txm->l3_len);
if (txm->ol_flags & PKT_TX_IPV4) {
if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
ipv4_hdr->hdr_checksum = 0;
tcp_hdr->cksum = 0;
@ -355,14 +355,14 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
uint32_t offset = 0;
bool start, done;
bool encap;
bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN);
bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
uint16_t vlan_tci = txm->vlan_tci;
uint64_t ol_flags = txm->ol_flags;
encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
((ol_flags & PKT_TX_OUTER_IPV4) ||
(ol_flags & PKT_TX_OUTER_IPV6));
encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
/* Preload inner-most TCP csum field with IP pseudo hdr
* calculated with IP length set to zero. HW will later
@ -477,15 +477,15 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
desc = &desc_base[q->head_idx];
info = IONIC_INFO_PTR(q, q->head_idx);
if ((ol_flags & PKT_TX_IP_CKSUM) &&
if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
(txq->flags & IONIC_QCQ_F_CSUM_L3)) {
opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
}
if (((ol_flags & PKT_TX_TCP_CKSUM) &&
if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
(txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
((ol_flags & PKT_TX_UDP_CKSUM) &&
((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
(txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
@ -494,11 +494,11 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
stats->no_csum++;
has_vlan = (ol_flags & PKT_TX_VLAN);
encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
((ol_flags & PKT_TX_OUTER_IPV4) ||
(ol_flags & PKT_TX_OUTER_IPV6));
has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN);
encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
(ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
@ -555,7 +555,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_prefetch0(&q->info[next_q_head_idx]);
}
if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
if (tx_pkts[nb_tx]->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
else
err = ionic_tx(txq, tx_pkts[nb_tx]);
@ -585,16 +585,15 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*
**********************************************************************/
#define IONIC_TX_OFFLOAD_MASK ( \
PKT_TX_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_TCP_SEG | \
PKT_TX_L4_MASK)
#define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_TCP_SEG | \
RTE_MBUF_F_TX_L4_MASK)
#define IONIC_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
uint16_t
ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@ -840,30 +839,30 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq,
}
/* RSS */
pkt_flags |= PKT_RX_RSS_HASH;
pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
rxm->hash.rss = cq_desc->rss_hash;
/* Vlan Strip */
if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
rxm->vlan_tci = cq_desc->vlan_tci;
}
/* Checksum */
if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
pkt_flags |= PKT_RX_IP_CKSUM_BAD;
pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
else if ((cq_desc->csum_flags &
IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
(cq_desc->csum_flags &
IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
pkt_flags |= PKT_RX_L4_CKSUM_BAD;
pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
rxm->ol_flags = pkt_flags;

View File

@ -1958,10 +1958,10 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
rxq = dev->data->rx_queues[queue];
if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
rxq->vlan_flags = PKT_RX_VLAN;
rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
}

View File

@ -54,27 +54,26 @@
#include "ixgbe_rxtx.h"
#ifdef RTE_LIBRTE_IEEE1588
#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
#define IXGBE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define IXGBE_TX_IEEE1588_TMST 0
#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
PKT_TX_OUTER_IPV6 | \
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
PKT_TX_MACSEC | \
PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_SEC_OFFLOAD | \
#define IXGBE_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
RTE_MBUF_F_TX_OUTER_IPV4 | \
RTE_MBUF_F_TX_IPV6 | \
RTE_MBUF_F_TX_IPV4 | \
RTE_MBUF_F_TX_VLAN | \
RTE_MBUF_F_TX_IP_CKSUM | \
RTE_MBUF_F_TX_L4_MASK | \
RTE_MBUF_F_TX_TCP_SEG | \
RTE_MBUF_F_TX_MACSEC | \
RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
RTE_MBUF_F_TX_SEC_OFFLOAD | \
IXGBE_TX_IEEE1588_TMST)
#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
#if 1
#define RTE_PMD_USE_PREFETCH
@ -384,13 +383,13 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
/* Specify which HW CTX to upload. */
mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
if (ol_flags & PKT_TX_VLAN)
if (ol_flags & RTE_MBUF_F_TX_VLAN)
tx_offload_mask.vlan_tci |= ~0;
/* check if TCP segmentation required for this packet */
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* implies IP cksum in IPv4 */
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
@ -406,14 +405,14 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
} else { /* no TSO, check if hardware checksum is needed */
if (ol_flags & PKT_TX_IP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
}
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
case RTE_MBUF_F_TX_UDP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_udp_hdr)
@ -421,7 +420,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
case PKT_TX_TCP_CKSUM:
case RTE_MBUF_F_TX_TCP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
@ -429,7 +428,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;
case PKT_TX_SCTP_CKSUM:
case RTE_MBUF_F_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
@ -444,7 +443,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
}
}
if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
tx_offload_mask.outer_l2_len |= ~0;
tx_offload_mask.outer_l3_len |= ~0;
tx_offload_mask.l2_len |= ~0;
@ -454,7 +453,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
#ifdef RTE_LIB_SECURITY
if (ol_flags & PKT_TX_SEC_OFFLOAD) {
if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
union ixgbe_crypto_tx_desc_md *md =
(union ixgbe_crypto_tx_desc_md *)mdata;
seqnum_seed |=
@ -478,7 +477,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
vlan_macip_lens = tx_offload.l3_len;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
vlan_macip_lens |= (tx_offload.outer_l2_len <<
IXGBE_ADVTXD_MACLEN_SHIFT);
else
@ -528,11 +527,11 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
{
uint32_t tmp = 0;
if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
if (ol_flags & PKT_TX_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_IXSM;
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
return tmp;
}
@ -542,13 +541,13 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
uint32_t cmdtype = 0;
if (ol_flags & PKT_TX_VLAN)
if (ol_flags & RTE_MBUF_F_TX_VLAN)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
if (ol_flags & PKT_TX_MACSEC)
if (ol_flags & RTE_MBUF_F_TX_MACSEC)
cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
return cmdtype;
}
@ -677,7 +676,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
ol_flags = tx_pkt->ol_flags;
#ifdef RTE_LIB_SECURITY
use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
use_ipsec = txq->using_ipsec && (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
#endif
/* If hardware offload required */
@ -825,14 +824,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
#ifdef RTE_LIBRTE_IEEE1588
if (ol_flags & PKT_TX_IEEE1588_TMST)
if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
#endif
olinfo_status = 0;
if (tx_ol_req) {
if (ol_flags & PKT_TX_TCP_SEG) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* when TSO is on, paylen in descriptor is the
* not the packet len but the tcp payload len */
pkt_len -= (tx_offload.l2_len +
@ -1432,14 +1431,14 @@ static inline uint64_t
ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
{
static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, PKT_RX_FDIR,
0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, RTE_MBUF_F_RX_FDIR,
};
#ifdef RTE_LIBRTE_IEEE1588
static uint64_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
@ -1467,7 +1466,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
#ifdef RTE_LIBRTE_IEEE1588
if (rx_status & IXGBE_RXD_STAT_TMST)
pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
return pkt_flags;
}
@ -1483,10 +1482,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info,
* Bit 30: L4I, L4I integrity error
*/
static uint64_t error_to_pkt_flags_map[4] = {
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
};
pkt_flags = error_to_pkt_flags_map[(rx_status >>
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
@ -1498,18 +1497,18 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info,
if ((rx_status & IXGBE_RXDADV_ERR_TCPE) &&
(pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
rx_udp_csum_zero_err)
pkt_flags &= ~PKT_RX_L4_CKSUM_BAD;
pkt_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_BAD;
if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
(rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
}
#ifdef RTE_LIB_SECURITY
if (rx_status & IXGBE_RXD_STAT_SECP) {
pkt_flags |= PKT_RX_SEC_OFFLOAD;
pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
}
#endif
@ -1596,10 +1595,10 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
ixgbe_rxd_pkt_info_to_pkt_type
(pkt_info[j], rxq->pkt_type_mask);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
mb->hash.rss = rte_le_to_cpu_32(
rxdp[j].wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
mb->hash.fdir.hash = rte_le_to_cpu_16(
rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
IXGBE_ATR_HASH_MASK;
@ -1917,7 +1916,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->port = rxq->port_id;
pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN set in pkt_flags */
/* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
@ -1931,10 +1930,10 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
rxq->pkt_type_mask);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
rxm->hash.rss = rte_le_to_cpu_32(
rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
rxm->hash.fdir.hash = rte_le_to_cpu_16(
rxd.wb.lower.hi_dword.csum_ip.csum) &
IXGBE_ATR_HASH_MASK;
@ -2010,7 +2009,7 @@ ixgbe_fill_cluster_head_buf(
head->port = rxq->port_id;
/* The vlan_tci field is only valid when PKT_RX_VLAN is
/* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field.
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
@ -2023,9 +2022,9 @@ ixgbe_fill_cluster_head_buf(
head->packet_type =
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
head->hash.fdir.hash =
rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
& IXGBE_ATR_HASH_MASK;

View File

@ -105,10 +105,10 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
0x00, 0x00, 0x00, 0x00};
const uint8x16_t rss_flags = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, PKT_RX_FDIR};
0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, RTE_MBUF_F_RX_FDIR};
/* mask everything except vlan present and l4/ip csum error */
const uint8x16_t vlan_csum_msk = {
@ -123,23 +123,23 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
/* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
const uint8x16_t vlan_csum_map_lo = {
PKT_RX_IP_CKSUM_GOOD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
PKT_RX_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
0, 0, 0, 0,
vlan_flags | PKT_RX_IP_CKSUM_GOOD,
vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
vlan_flags | PKT_RX_IP_CKSUM_BAD,
vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
0, 0, 0, 0};
const uint8x16_t vlan_csum_map_hi = {
PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
0, 0, 0, 0,
PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
0, 0, 0, 0};
/* change mask from 0x200(IXGBE_RXDADV_PKTTYPE_UDP) to 0x2 */
@ -153,7 +153,7 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
0, 0, 0, 0};
const uint8x16_t udp_csum_bad_shuf = {
0xFF, ~(uint8_t)PKT_RX_L4_CKSUM_BAD, 0, 0,
0xFF, ~(uint8_t)RTE_MBUF_F_RX_L4_CKSUM_BAD, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0};
@ -194,7 +194,7 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
vtag_lo = vorrq_u8(ptype, vtag_lo);
/* convert the UDP header present 0x2 to 0x1 for aligning with each
* PKT_RX_L4_CKSUM_BAD value in low byte of 8 bits word ol_flag in
* RTE_MBUF_F_RX_L4_CKSUM_BAD value in low byte of 8 bits word ol_flag in
* vtag_lo (4x8). Then mask out the bad checksum value by shuffle and
* bit-mask.
*/
@ -337,7 +337,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sw_ring = &rxq->sw_ring[rxq->rx_tail];
/* ensure these 2 flags are in the lower 8 bits */
RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
RTE_BUILD_BUG_ON((RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED) > UINT8_MAX);
vlan_flags = rxq->vlan_flags & UINT8_MAX;
/* A. load 4 packet in one loop

View File

@ -108,9 +108,9 @@ desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i ipsec_proc_msk =
_mm_set1_epi32(IXGBE_RXDADV_IPSEC_STATUS_SECP);
const __m128i ipsec_err_flag =
_mm_set1_epi32(PKT_RX_SEC_OFFLOAD_FAILED |
PKT_RX_SEC_OFFLOAD);
const __m128i ipsec_proc_flag = _mm_set1_epi32(PKT_RX_SEC_OFFLOAD);
_mm_set1_epi32(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED |
RTE_MBUF_F_RX_SEC_OFFLOAD);
const __m128i ipsec_proc_flag = _mm_set1_epi32(RTE_MBUF_F_RX_SEC_OFFLOAD);
rearm = _mm_set_epi32(*rearm3, *rearm2, *rearm1, *rearm0);
sterr = _mm_set_epi32(_mm_extract_epi32(descs[3], 2),
@ -148,10 +148,10 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
0x00FF, 0x00FF, 0x00FF, 0x00FF);
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
0, 0, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
const __m128i rss_flags = _mm_set_epi8(RTE_MBUF_F_RX_FDIR, 0, 0, 0,
0, 0, 0, RTE_MBUF_F_RX_RSS_HASH,
RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH, 0,
RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, 0);
/* mask everything except vlan present and l4/ip csum error */
const __m128i vlan_csum_msk = _mm_set_epi16(
@ -165,23 +165,23 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
/* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
const __m128i vlan_csum_map_lo = _mm_set_epi8(
0, 0, 0, 0,
vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
vlan_flags | PKT_RX_IP_CKSUM_BAD,
vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
vlan_flags | PKT_RX_IP_CKSUM_GOOD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD,
0, 0, 0, 0,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
PKT_RX_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
PKT_RX_IP_CKSUM_GOOD);
RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
RTE_MBUF_F_RX_IP_CKSUM_GOOD);
const __m128i vlan_csum_map_hi = _mm_set_epi8(
0, 0, 0, 0,
0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
0, 0, 0, 0,
0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
/* mask everything except UDP header present if specified */
const __m128i udp_hdr_p_msk = _mm_set_epi16
@ -190,7 +190,7 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
const __m128i udp_csum_bad_shuf = _mm_set_epi8
(0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, ~(uint8_t)PKT_RX_L4_CKSUM_BAD, 0xFF);
0, 0, 0, 0, 0, 0, ~(uint8_t)RTE_MBUF_F_RX_L4_CKSUM_BAD, 0xFF);
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
@ -228,7 +228,7 @@ desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
vtag1 = _mm_or_si128(ptype0, vtag1);
/* convert the UDP header present 0x200 to 0x1 for aligning with each
* PKT_RX_L4_CKSUM_BAD value in low byte of 16 bits word ol_flag in
* RTE_MBUF_F_RX_L4_CKSUM_BAD value in low byte of 16 bits word ol_flag in
* vtag1 (4x16). Then mask out the bad checksum value by shuffle and
* bit-mask.
*/
@ -428,7 +428,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sw_ring = &rxq->sw_ring[rxq->rx_tail];
/* ensure these 2 flags are in the lower 8 bits */
RTE_BUILD_BUG_ON((PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
RTE_BUILD_BUG_ON((RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED) > UINT8_MAX);
vlan_flags = rxq->vlan_flags & UINT8_MAX;
/* A. load 4 packet in one loop

View File

@ -437,7 +437,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
if (rh->r_dh.has_hash) {
uint64_t *hash_ptr;
nicbuf->ol_flags |= PKT_RX_RSS_HASH;
nicbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
hash_ptr = rte_pktmbuf_mtod(nicbuf,
uint64_t *);
lio_swap_8B_data(hash_ptr, 1);
@ -494,7 +494,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
uint64_t *hash_ptr;
nicbuf->ol_flags |=
PKT_RX_RSS_HASH;
RTE_MBUF_F_RX_RSS_HASH;
hash_ptr = rte_pktmbuf_mtod(
nicbuf, uint64_t *);
lio_swap_8B_data(hash_ptr, 1);
@ -547,10 +547,10 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
struct rte_mbuf *m = rx_pkts[data_pkts - 1];
if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (droq->refill_count >= droq->refill_threshold) {
@ -1675,13 +1675,13 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
cmdsetup.s.iq_no = iq_no;
/* check checksum offload flags to form cmd */
if (m->ol_flags & PKT_TX_IP_CKSUM)
if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
cmdsetup.s.ip_csum = 1;
if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
cmdsetup.s.tnl_csum = 1;
else if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
(m->ol_flags & PKT_TX_UDP_CKSUM))
else if ((m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ||
(m->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM))
cmdsetup.s.transport_csum = 1;
if (m->nb_segs == 1) {

View File

@ -406,7 +406,7 @@ mlx4_tx_burst_tso_get_params(struct rte_mbuf *buf,
{
struct mlx4_sq *sq = &txq->msq;
const uint8_t tunneled = txq->priv->hw_csum_l2tun &&
(buf->ol_flags & PKT_TX_TUNNEL_MASK);
(buf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
tinfo->tso_header_size = buf->l2_len + buf->l3_len + buf->l4_len;
if (tunneled)
@ -915,7 +915,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t flags16[2];
} srcrb;
uint32_t lkey;
bool tso = txq->priv->tso && (buf->ol_flags & PKT_TX_TCP_SEG);
bool tso = txq->priv->tso && (buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
/* Clean up old buffer. */
if (likely(elt->buf != NULL)) {
@ -991,15 +991,15 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Enable HW checksum offload if requested */
if (txq->csum &&
(buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
(RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM))) {
const uint64_t is_tunneled = (buf->ol_flags &
(PKT_TX_TUNNEL_GRE |
PKT_TX_TUNNEL_VXLAN));
(RTE_MBUF_F_TX_TUNNEL_GRE |
RTE_MBUF_F_TX_TUNNEL_VXLAN));
if (is_tunneled && txq->csum_l2tun) {
owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
MLX4_WQE_CTRL_IL4_HDR_CSUM;
if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
if (buf->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
srcrb.flags |=
RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
} else {
@ -1112,18 +1112,18 @@ rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
ol_flags |=
mlx4_transpose(flags,
MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
PKT_RX_IP_CKSUM_GOOD) |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
mlx4_transpose(flags,
MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
PKT_RX_L4_CKSUM_GOOD);
RTE_MBUF_F_RX_L4_CKSUM_GOOD);
if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
ol_flags |=
mlx4_transpose(flags,
MLX4_CQE_L2_TUNNEL_IPOK,
PKT_RX_IP_CKSUM_GOOD) |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
mlx4_transpose(flags,
MLX4_CQE_L2_TUNNEL_L4_CSUM,
PKT_RX_L4_CKSUM_GOOD);
RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return ol_flags;
}
@ -1274,7 +1274,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Update packet information. */
pkt->packet_type =
rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
pkt->ol_flags = PKT_RX_RSS_HASH;
pkt->ol_flags = RTE_MBUF_F_RX_RSS_HASH;
pkt->hash.rss = cqe->immed_rss_invalid;
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;

View File

@ -9391,7 +9391,7 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
{
uint64_t ol_flags = m->ol_flags;
const struct mlx5_flow_tbl_data_entry *tble;
const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
const uint64_t mask = RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
if (!is_tunnel_offload_active(dev)) {
info->flags = 0;

View File

@ -692,10 +692,10 @@ rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
ol_flags =
TRANSPOSE(flags,
MLX5_CQE_RX_L3_HDR_VALID,
PKT_RX_IP_CKSUM_GOOD) |
RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
TRANSPOSE(flags,
MLX5_CQE_RX_L4_HDR_VALID,
PKT_RX_L4_CKSUM_GOOD);
RTE_MBUF_F_RX_L4_CKSUM_GOOD);
return ol_flags;
}
@ -731,7 +731,7 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
if (rss_hash_res) {
pkt->hash.rss = rss_hash_res;
pkt->ol_flags |= PKT_RX_RSS_HASH;
pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
}
if (rxq->mark) {
@ -745,9 +745,9 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
(mcqe->flow_tag_high << 16);
if (MLX5_FLOW_MARK_IS_VALID(mark)) {
pkt->ol_flags |= PKT_RX_FDIR;
pkt->ol_flags |= RTE_MBUF_F_RX_FDIR;
if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
pkt->ol_flags |= PKT_RX_FDIR_ID;
pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
@ -775,7 +775,7 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
vlan_strip = mcqe->hdr_type &
RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
if (vlan_strip) {
pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
}
}
@ -863,7 +863,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
pkt = seg;
MLX5_ASSERT(len >= (rxq->crc_present << 2));
pkt->ol_flags &= EXT_ATTACHED_MBUF;
pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
@ -872,7 +872,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
mlx5_lro_update_hdr
(rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
mcqe, rxq, len);
pkt->ol_flags |= PKT_RX_LRO;
pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
pkt->tso_segsz = len / cqe->lro_num_seg;
}
}
@ -1130,7 +1130,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (cqe->lro_num_seg > 1) {
mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
cqe, mcqe, rxq, len);
pkt->ol_flags |= PKT_RX_LRO;
pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
pkt->tso_segsz = len / cqe->lro_num_seg;
}
PKT_LEN(pkt) = len;

View File

@ -483,7 +483,7 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
shinfo = &buf->shinfos[strd_idx];
rte_mbuf_ext_refcnt_set(shinfo, 1);
/*
* EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
* RTE_MBUF_F_EXTERNAL will be set to pkt->ol_flags when
* attaching the stride to mbuf and more offload flags
* will be added below by calling rxq_cq_to_mbuf().
* Other fields will be overwritten.
@ -492,7 +492,7 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
buf_len, shinfo);
/* Set mbuf head-room. */
SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
MLX5_ASSERT(pkt->ol_flags == RTE_MBUF_F_EXTERNAL);
MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
DATA_LEN(pkt) = len;

View File

@ -181,7 +181,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
mbuf_init->nb_segs = 1;
mbuf_init->port = rxq->port_id;
if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
/*
* prevent compiler reordering:
* rearm_data covers previous fields.

View File

@ -254,10 +254,10 @@ mlx5_set_cksum_table(void)
/*
* The index should have:
* bit[0] = PKT_TX_TCP_SEG
* bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
* bit[4] = PKT_TX_IP_CKSUM
* bit[8] = PKT_TX_OUTER_IP_CKSUM
* bit[0] = RTE_MBUF_F_TX_TCP_SEG
* bit[2:3] = RTE_MBUF_F_TX_UDP_CKSUM, RTE_MBUF_F_TX_TCP_CKSUM
* bit[4] = RTE_MBUF_F_TX_IP_CKSUM
* bit[8] = RTE_MBUF_F_TX_OUTER_IP_CKSUM
* bit[9] = tunnel
*/
for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
@ -292,10 +292,10 @@ mlx5_set_swp_types_table(void)
/*
* The index should have:
* bit[0:1] = PKT_TX_L4_MASK
* bit[4] = PKT_TX_IPV6
* bit[8] = PKT_TX_OUTER_IPV6
* bit[9] = PKT_TX_OUTER_UDP
* bit[0:1] = RTE_MBUF_F_TX_L4_MASK
* bit[4] = RTE_MBUF_F_TX_IPV6
* bit[8] = RTE_MBUF_F_TX_OUTER_IPV6
* bit[9] = RTE_MBUF_F_TX_OUTER_UDP
*/
for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
v = 0;
@ -305,7 +305,7 @@ mlx5_set_swp_types_table(void)
v |= MLX5_ETH_WQE_L4_OUTER_UDP;
if (i & (1 << 4))
v |= MLX5_ETH_WQE_L3_INNER_IPV6;
if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
if ((i & 3) == (RTE_MBUF_F_TX_UDP_CKSUM >> 52))
v |= MLX5_ETH_WQE_L4_INNER_UDP;
mlx5_swp_types_table[i] = v;
}

Some files were not shown because too many files have changed in this diff Show More