mbuf: remove packet type from offload flags

The extended unified packet type is now part of the standard ABI.
As mbuf struct is changed, the mbuf library version is incremented.

Signed-off-by: Thomas Monjalon <thomas.monjalon@6wind.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
This commit is contained in:
Thomas Monjalon 2015-08-31 16:51:21 +02:00
parent d8c4ae2755
commit ab351fe1c9
25 changed files with 6 additions and 897 deletions

View File

@ -459,33 +459,21 @@ app_main_loop_rx_metadata(void) {
signature = RTE_MBUF_METADATA_UINT32_PTR(m, 0);
key = RTE_MBUF_METADATA_UINT8_PTR(m, 32);
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
#else
if (m->ol_flags & PKT_RX_IPV4_HDR) {
#endif
ip_hdr = (struct ipv4_hdr *)
&m_data[sizeof(struct ether_hdr)];
ip_dst = ip_hdr->dst_addr;
k32 = (uint32_t *) key;
k32[0] = ip_dst & 0xFFFFFF00;
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
#else
} else {
#endif
ipv6_hdr = (struct ipv6_hdr *)
&m_data[sizeof(struct ether_hdr)];
ipv6_dst = ipv6_hdr->dst_addr;
memcpy(key, ipv6_dst, 16);
#ifdef RTE_NEXT_ABI
} else
continue;
#else
}
#endif
*signature = test_hash(key, 0, 0);
}

View File

@ -202,14 +202,9 @@ parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info)
/* Parse a vxlan header */
static void
#ifdef RTE_NEXT_ABI
parse_vxlan(struct udp_hdr *udp_hdr,
struct testpmd_offload_info *info,
uint32_t pkt_type)
#else
parse_vxlan(struct udp_hdr *udp_hdr, struct testpmd_offload_info *info,
uint64_t mbuf_olflags)
#endif
{
struct ether_hdr *eth_hdr;
@ -217,12 +212,7 @@ parse_vxlan(struct udp_hdr *udp_hdr, struct testpmd_offload_info *info,
* (rfc7348) or that the rx offload flag is set (i40e only
* currently) */
if (udp_hdr->dst_port != _htons(4789) &&
#ifdef RTE_NEXT_ABI
RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0)
#else
(mbuf_olflags & (PKT_RX_TUNNEL_IPV4_HDR |
PKT_RX_TUNNEL_IPV6_HDR)) == 0)
#endif
return;
info->is_tunnel = 1;
@ -559,11 +549,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
struct udp_hdr *udp_hdr;
udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
info.l3_len);
#ifdef RTE_NEXT_ABI
parse_vxlan(udp_hdr, &info, m->packet_type);
#else
parse_vxlan(udp_hdr, &info, m->ol_flags);
#endif
} else if (info.l4_proto == IPPROTO_GRE) {
struct simple_gre_hdr *gre_hdr;
gre_hdr = (struct simple_gre_hdr *)

View File

@ -91,11 +91,7 @@ pkt_burst_receive(struct fwd_stream *fs)
uint64_t ol_flags;
uint16_t nb_rx;
uint16_t i, packet_type;
#ifdef RTE_NEXT_ABI
uint16_t is_encapsulation;
#else
uint64_t is_encapsulation;
#endif
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@ -138,13 +134,7 @@ pkt_burst_receive(struct fwd_stream *fs)
eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
ol_flags = mb->ol_flags;
packet_type = mb->packet_type;
#ifdef RTE_NEXT_ABI
is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type);
#else
is_encapsulation = ol_flags & (PKT_RX_TUNNEL_IPV4_HDR |
PKT_RX_TUNNEL_IPV6_HDR);
#endif
print_ether_addr(" src=", &eth_hdr->s_addr);
print_ether_addr(" - dst=", &eth_hdr->d_addr);
@ -171,7 +161,6 @@ pkt_burst_receive(struct fwd_stream *fs)
if (ol_flags & PKT_RX_QINQ_PKT)
printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
mb->vlan_tci, mb->vlan_tci_outer);
#ifdef RTE_NEXT_ABI
if (mb->packet_type) {
uint32_t ptype;
@ -341,7 +330,6 @@ pkt_burst_receive(struct fwd_stream *fs)
printf("\n");
} else
printf("Unknown packet type\n");
#endif /* RTE_NEXT_ABI */
if (is_encapsulation) {
struct ipv4_hdr *ipv4_hdr;
struct ipv6_hdr *ipv6_hdr;
@ -355,11 +343,7 @@ pkt_burst_receive(struct fwd_stream *fs)
l2_len = sizeof(struct ether_hdr);
/* Do not support ipv4 option field */
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
#else
if (ol_flags & PKT_RX_TUNNEL_IPV4_HDR) {
#endif
l3_len = sizeof(struct ipv4_hdr);
ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
struct ipv4_hdr *,

View File

@ -273,21 +273,9 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
if (ipv4) {
pkt->vlan_tci = ETHER_TYPE_IPv4;
pkt->l3_len = sizeof(struct ipv4_hdr);
#ifndef RTE_NEXT_ABI
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
else
pkt->ol_flags = PKT_RX_IPV4_HDR;
#endif
} else {
pkt->vlan_tci = ETHER_TYPE_IPv6;
pkt->l3_len = sizeof(struct ipv6_hdr);
#ifndef RTE_NEXT_ABI
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
else
pkt->ol_flags = PKT_RX_IPV6_HDR;
#endif
}
pkts_burst[nb_pkt] = pkt;

View File

@ -24,11 +24,6 @@ Deprecation Notices
* The field mem_location of the rte_lpm structure is deprecated and should be
removed as well as the macros RTE_LPM_HEAP and RTE_LPM_MEMZONE.
* Significant ABI changes are planned for struct rte_mbuf, struct rte_kni_mbuf,
and several ``PKT_RX_`` flags will be removed, to support unified packet type
from release 2.1. Those changes may be enabled in the upcoming release 2.1
with CONFIG_RTE_NEXT_ABI.
* librte_malloc library has been integrated into librte_eal. The 2.1 release
creates a dummy/empty malloc library to fulfill binaries with dynamic linking
dependencies on librte_malloc.so. Such dummy library will not be created from

View File

@ -23,6 +23,9 @@ ABI Changes
* The EAL and ethdev structures rte_intr_handle and rte_eth_conf were changed
to support Rx interrupt. It was already done in 2.1 for CONFIG_RTE_NEXT_ABI.
* The mbuf structure was changed to support unified packet type.
It was already done in 2.1 for CONFIG_RTE_NEXT_ABI.
Shared Library Versions
-----------------------
@ -45,7 +48,7 @@ The libraries prepended with a plus sign were incremented in this version.
librte_kvargs.so.1
librte_lpm.so.1
librte_malloc.so.1
librte_mbuf.so.1
+ librte_mbuf.so.2
librte_mempool.so.1
librte_meter.so.1
librte_pipeline.so.1

View File

@ -1299,22 +1299,14 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
mbuf->port = pkt->iff;
if (pkt->l2info & htonl(F_RXF_IP)) {
#ifdef RTE_NEXT_ABI
mbuf->packet_type = RTE_PTYPE_L3_IPV4;
#else
mbuf->ol_flags |= PKT_RX_IPV4_HDR;
#endif
if (unlikely(!csum_ok))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
} else if (pkt->l2info & htonl(F_RXF_IP6)) {
#ifdef RTE_NEXT_ABI
mbuf->packet_type = RTE_PTYPE_L3_IPV6;
#else
mbuf->ol_flags |= PKT_RX_IPV6_HDR;
#endif
}
mbuf->port = pkt->iff;
@ -1419,11 +1411,7 @@ static int process_responses(struct sge_rspq *q, int budget,
unmap_rx_buf(&rxq->fl);
if (cpl->l2info & htonl(F_RXF_IP)) {
#ifdef RTE_NEXT_ABI
pkt->packet_type = RTE_PTYPE_L3_IPV4;
#else
pkt->ol_flags |= PKT_RX_IPV4_HDR;
#endif
if (unlikely(!csum_ok))
pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
@ -1431,11 +1419,7 @@ static int process_responses(struct sge_rspq *q, int budget,
htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
} else if (cpl->l2info & htonl(F_RXF_IP6)) {
#ifdef RTE_NEXT_ABI
pkt->packet_type = RTE_PTYPE_L3_IPV6;
#else
pkt->ol_flags |= PKT_RX_IPV6_HDR;
#endif
}
if (!rss_hdr->filter_tid && rss_hdr->hash_type) {

View File

@ -590,7 +590,6 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* RX functions
*
**********************************************************************/
#ifdef RTE_NEXT_ABI
#define IGB_PACKET_TYPE_IPV4 0X01
#define IGB_PACKET_TYPE_IPV4_TCP 0X11
#define IGB_PACKET_TYPE_IPV4_UDP 0X21
@ -684,35 +683,6 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
return pkt_flags;
}
#else /* RTE_NEXT_ABI */
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
{
uint64_t pkt_flags;
static uint64_t ip_pkt_types_map[16] = {
0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
PKT_RX_IPV6_HDR, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
};
#if defined(RTE_LIBRTE_IEEE1588)
static uint32_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
#else
pkt_flags = (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
#endif
return pkt_flags | (((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH);
}
#endif /* RTE_NEXT_ABI */
static inline uint64_t
rx_desc_status_to_pkt_flags(uint32_t rx_status)
@ -886,10 +856,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
rxm->ol_flags = pkt_flags;
#ifdef RTE_NEXT_ABI
rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
lo_dword.hs_rss.pkt_info);
#endif
/*
* Store the mbuf address into the next entry of the array
@ -1124,10 +1092,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
first_seg->ol_flags = pkt_flags;
#ifdef RTE_NEXT_ABI
first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
lower.lo_dword.hs_rss.pkt_info);
#endif
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +

View File

@ -423,11 +423,7 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
rx_pkt->pkt_len = bytes_written;
if (ipv4) {
#ifdef RTE_NEXT_ABI
rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
#else
rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
#endif
if (!csum_not_calc) {
if (unlikely(!ipv4_csum_ok))
rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
@ -436,11 +432,7 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
} else if (ipv6)
#ifdef RTE_NEXT_ABI
rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
#else
rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
#endif
} else {
/* Header split */
if (sop && !eop) {
@ -453,11 +445,7 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
*rx_pkt_bucket = rx_pkt;
rx_pkt->pkt_len = bytes_written;
if (ipv4) {
#ifdef RTE_NEXT_ABI
rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
#else
rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
#endif
if (!csum_not_calc) {
if (unlikely(!ipv4_csum_ok))
rx_pkt->ol_flags |=
@ -469,22 +457,14 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
PKT_RX_L4_CKSUM_BAD;
}
} else if (ipv6)
#ifdef RTE_NEXT_ABI
rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
#else
rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
#endif
} else {
/* Payload */
hdr_rx_pkt = *rx_pkt_bucket;
hdr_rx_pkt->pkt_len += bytes_written;
if (ipv4) {
#ifdef RTE_NEXT_ABI
hdr_rx_pkt->packet_type =
RTE_PTYPE_L3_IPV4;
#else
hdr_rx_pkt->ol_flags |= PKT_RX_IPV4_HDR;
#endif
if (!csum_not_calc) {
if (unlikely(!ipv4_csum_ok))
hdr_rx_pkt->ol_flags |=
@ -496,13 +476,8 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
PKT_RX_L4_CKSUM_BAD;
}
} else if (ipv6)
#ifdef RTE_NEXT_ABI
hdr_rx_pkt->packet_type =
RTE_PTYPE_L3_IPV6;
#else
hdr_rx_pkt->ol_flags |= PKT_RX_IPV6_HDR;
#endif
}
}
}

View File

@ -68,7 +68,6 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd)
static inline void
rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
{
#ifdef RTE_NEXT_ABI
static const uint32_t
ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
__rte_cache_aligned = {
@ -91,14 +90,6 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
>> FM10K_RXD_PKTTYPE_SHIFT];
#else /* RTE_NEXT_ABI */
uint16_t ptype;
static const uint16_t pt_lut[] = { 0,
PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT,
PKT_RX_IPV6_HDR, PKT_RX_IPV6_HDR_EXT,
0, 0, 0
};
#endif /* RTE_NEXT_ABI */
if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
m->ol_flags |= PKT_RX_RSS_HASH;
@ -121,12 +112,6 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE))
m->ol_flags |= PKT_RX_RECIP_ERR;
#ifndef RTE_NEXT_ABI
ptype = (d->d.data & FM10K_RXD_PKTTYPE_MASK_L3) >>
FM10K_RXD_PKTTYPE_SHIFT;
m->ol_flags |= pt_lut[(uint8_t)ptype];
#endif
}
uint16_t

View File

@ -188,11 +188,9 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
| I40E_RXD_QW1_STATUS_TSYNINDX_MASK))
>> I40E_RX_DESC_STATUS_TSYNINDX_SHIFT;
#ifdef RTE_NEXT_ABI
if ((mb->packet_type & RTE_PTYPE_L2_MASK)
== RTE_PTYPE_L2_ETHER_TIMESYNC)
pkt_flags = PKT_RX_IEEE1588_PTP;
#endif
if (tsyn & 0x04) {
pkt_flags |= PKT_RX_IEEE1588_TMST;
mb->timesync = tsyn & 0x03;
@ -202,7 +200,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
}
#endif
#ifdef RTE_NEXT_ABI
/* For each value it means, datasheet of hardware can tell more details */
static inline uint32_t
i40e_rxd_pkt_type_mapping(uint8_t ptype)
@ -735,275 +732,6 @@ i40e_rxd_pkt_type_mapping(uint8_t ptype)
return ptype_table[ptype];
}
#else /* RTE_NEXT_ABI */
/* Translate pkt types to pkt flags */
static inline uint64_t
i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
{
uint8_t ptype = (uint8_t)((qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
static const uint64_t ip_ptype_map[I40E_MAX_PKT_TYPE] = {
0, /* PTYPE 0 */
0, /* PTYPE 1 */
PKT_RX_IEEE1588_PTP, /* PTYPE 2 */
0, /* PTYPE 3 */
0, /* PTYPE 4 */
0, /* PTYPE 5 */
0, /* PTYPE 6 */
0, /* PTYPE 7 */
0, /* PTYPE 8 */
0, /* PTYPE 9 */
0, /* PTYPE 10 */
0, /* PTYPE 11 */
0, /* PTYPE 12 */
0, /* PTYPE 13 */
0, /* PTYPE 14 */
0, /* PTYPE 15 */
0, /* PTYPE 16 */
0, /* PTYPE 17 */
0, /* PTYPE 18 */
0, /* PTYPE 19 */
0, /* PTYPE 20 */
0, /* PTYPE 21 */
PKT_RX_IPV4_HDR, /* PTYPE 22 */
PKT_RX_IPV4_HDR, /* PTYPE 23 */
PKT_RX_IPV4_HDR, /* PTYPE 24 */
0, /* PTYPE 25 */
PKT_RX_IPV4_HDR, /* PTYPE 26 */
PKT_RX_IPV4_HDR, /* PTYPE 27 */
PKT_RX_IPV4_HDR, /* PTYPE 28 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 29 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 30 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 31 */
0, /* PTYPE 32 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 33 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 34 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 35 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 36 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 37 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 38 */
0, /* PTYPE 39 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 40 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 41 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 42 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 43 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 44 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 45 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 46 */
0, /* PTYPE 47 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 48 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 49 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 50 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 51 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 52 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 53 */
0, /* PTYPE 54 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 55 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 56 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 57 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 58 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 59 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 60 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 61 */
0, /* PTYPE 62 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 63 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 64 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 65 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 66 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 67 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 68 */
0, /* PTYPE 69 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 70 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 71 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 72 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 73 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 74 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 75 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 76 */
0, /* PTYPE 77 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 78 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 79 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 80 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 81 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 82 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 83 */
0, /* PTYPE 84 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 85 */
PKT_RX_TUNNEL_IPV4_HDR, /* PTYPE 86 */
PKT_RX_IPV4_HDR_EXT, /* PTYPE 87 */
PKT_RX_IPV6_HDR, /* PTYPE 88 */
PKT_RX_IPV6_HDR, /* PTYPE 89 */
PKT_RX_IPV6_HDR, /* PTYPE 90 */
0, /* PTYPE 91 */
PKT_RX_IPV6_HDR, /* PTYPE 92 */
PKT_RX_IPV6_HDR, /* PTYPE 93 */
PKT_RX_IPV6_HDR, /* PTYPE 94 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 95 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 96 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 97 */
0, /* PTYPE 98 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 99 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 100 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 101 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 102 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 103 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 104 */
0, /* PTYPE 105 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 106 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 107 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 108 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 109 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 110 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 111 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 112 */
0, /* PTYPE 113 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 114 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 115 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 116 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 117 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 118 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 119 */
0, /* PTYPE 120 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 121 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 122 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 123 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 124 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 125 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 126 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 127 */
0, /* PTYPE 128 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 129 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 130 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 131 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 132 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 133 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 134 */
0, /* PTYPE 135 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 136 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 137 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 138 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 139 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 140 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 141 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 142 */
0, /* PTYPE 143 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 144 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 145 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 146 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 147 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 148 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 149 */
0, /* PTYPE 150 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 151 */
PKT_RX_TUNNEL_IPV6_HDR, /* PTYPE 152 */
PKT_RX_IPV6_HDR_EXT, /* PTYPE 153 */
0, /* PTYPE 154 */
0, /* PTYPE 155 */
0, /* PTYPE 156 */
0, /* PTYPE 157 */
0, /* PTYPE 158 */
0, /* PTYPE 159 */
0, /* PTYPE 160 */
0, /* PTYPE 161 */
0, /* PTYPE 162 */
0, /* PTYPE 163 */
0, /* PTYPE 164 */
0, /* PTYPE 165 */
0, /* PTYPE 166 */
0, /* PTYPE 167 */
0, /* PTYPE 168 */
0, /* PTYPE 169 */
0, /* PTYPE 170 */
0, /* PTYPE 171 */
0, /* PTYPE 172 */
0, /* PTYPE 173 */
0, /* PTYPE 174 */
0, /* PTYPE 175 */
0, /* PTYPE 176 */
0, /* PTYPE 177 */
0, /* PTYPE 178 */
0, /* PTYPE 179 */
0, /* PTYPE 180 */
0, /* PTYPE 181 */
0, /* PTYPE 182 */
0, /* PTYPE 183 */
0, /* PTYPE 184 */
0, /* PTYPE 185 */
0, /* PTYPE 186 */
0, /* PTYPE 187 */
0, /* PTYPE 188 */
0, /* PTYPE 189 */
0, /* PTYPE 190 */
0, /* PTYPE 191 */
0, /* PTYPE 192 */
0, /* PTYPE 193 */
0, /* PTYPE 194 */
0, /* PTYPE 195 */
0, /* PTYPE 196 */
0, /* PTYPE 197 */
0, /* PTYPE 198 */
0, /* PTYPE 199 */
0, /* PTYPE 200 */
0, /* PTYPE 201 */
0, /* PTYPE 202 */
0, /* PTYPE 203 */
0, /* PTYPE 204 */
0, /* PTYPE 205 */
0, /* PTYPE 206 */
0, /* PTYPE 207 */
0, /* PTYPE 208 */
0, /* PTYPE 209 */
0, /* PTYPE 210 */
0, /* PTYPE 211 */
0, /* PTYPE 212 */
0, /* PTYPE 213 */
0, /* PTYPE 214 */
0, /* PTYPE 215 */
0, /* PTYPE 216 */
0, /* PTYPE 217 */
0, /* PTYPE 218 */
0, /* PTYPE 219 */
0, /* PTYPE 220 */
0, /* PTYPE 221 */
0, /* PTYPE 222 */
0, /* PTYPE 223 */
0, /* PTYPE 224 */
0, /* PTYPE 225 */
0, /* PTYPE 226 */
0, /* PTYPE 227 */
0, /* PTYPE 228 */
0, /* PTYPE 229 */
0, /* PTYPE 230 */
0, /* PTYPE 231 */
0, /* PTYPE 232 */
0, /* PTYPE 233 */
0, /* PTYPE 234 */
0, /* PTYPE 235 */
0, /* PTYPE 236 */
0, /* PTYPE 237 */
0, /* PTYPE 238 */
0, /* PTYPE 239 */
0, /* PTYPE 240 */
0, /* PTYPE 241 */
0, /* PTYPE 242 */
0, /* PTYPE 243 */
0, /* PTYPE 244 */
0, /* PTYPE 245 */
0, /* PTYPE 246 */
0, /* PTYPE 247 */
0, /* PTYPE 248 */
0, /* PTYPE 249 */
0, /* PTYPE 250 */
0, /* PTYPE 251 */
0, /* PTYPE 252 */
0, /* PTYPE 253 */
0, /* PTYPE 254 */
0, /* PTYPE 255 */
};
return ip_ptype_map[ptype];
}
#endif /* RTE_NEXT_ABI */
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
@ -1292,18 +1020,10 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
i40e_rxd_to_vlan_tci(mb, &rxdp[j]);
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
#ifdef RTE_NEXT_ABI
mb->packet_type =
i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT));
#else
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
mb->packet_type = (uint16_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
#endif /* RTE_NEXT_ABI */
if (pkt_flags & PKT_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
@ -1549,15 +1269,9 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
i40e_rxd_to_vlan_tci(rxm, &rxd);
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
#ifdef RTE_NEXT_ABI
rxm->packet_type =
i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
#else
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
#endif /* RTE_NEXT_ABI */
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
@ -1717,16 +1431,9 @@ i40e_recv_scattered_pkts(void *rx_queue,
i40e_rxd_to_vlan_tci(first_seg, &rxd);
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
#ifdef RTE_NEXT_ABI
first_seg->packet_type =
i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
#else
pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
first_seg->packet_type = (uint16_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT);
#endif /* RTE_NEXT_ABI */
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);

View File

@ -864,7 +864,6 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* RX functions
*
**********************************************************************/
#ifdef RTE_NEXT_ABI
#define IXGBE_PACKET_TYPE_IPV4 0X01
#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
@ -967,43 +966,6 @@ ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
return ip_rss_types_map[pkt_info & 0XF];
#endif
}
#else /* RTE_NEXT_ABI */
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
{
uint64_t pkt_flags;
static const uint64_t ip_pkt_types_map[16] = {
0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
PKT_RX_IPV6_HDR, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
};
static const uint64_t ip_rss_types_map[16] = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, PKT_RX_FDIR,
};
#ifdef RTE_LIBRTE_IEEE1588
static uint64_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
#else
pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
#endif
return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
}
#endif /* RTE_NEXT_ABI */
static inline uint64_t
rx_desc_status_to_pkt_flags(uint32_t rx_status)
@ -1058,13 +1020,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
#ifdef RTE_NEXT_ABI
int nb_dd;
uint32_t s[LOOK_AHEAD];
uint16_t pkt_info[LOOK_AHEAD];
#else
int s[LOOK_AHEAD], nb_dd;
#endif /* RTE_NEXT_ABI */
int i, j, nb_rx = 0;
uint32_t status;
@ -1088,11 +1046,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
for (j = LOOK_AHEAD-1; j >= 0; --j)
s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
#ifdef RTE_NEXT_ABI
for (j = LOOK_AHEAD - 1; j >= 0; --j)
pkt_info[j] = rxdp[j].wb.lower.lo_dword.
hs_rss.pkt_info;
#endif /* RTE_NEXT_ABI */
/* Compute how many status bits were set */
nb_dd = 0;
@ -1111,7 +1067,6 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
#ifdef RTE_NEXT_ABI
pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
pkt_flags |=
@ -1119,15 +1074,6 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
mb->ol_flags = pkt_flags;
mb->packet_type =
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
#else /* RTE_NEXT_ABI */
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
rte_le_to_cpu_32(
rxdp[j].wb.lower.lo_dword.data));
/* reuse status field from scan list */
pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
mb->ol_flags = pkt_flags;
#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
mb->hash.rss = rte_le_to_cpu_32(
@ -1328,11 +1274,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
union ixgbe_adv_rx_desc rxd;
uint64_t dma_addr;
uint32_t staterr;
#ifdef RTE_NEXT_ABI
uint32_t pkt_info;
#else
uint32_t hlen_type_rss;
#endif
uint16_t pkt_len;
uint16_t rx_id;
uint16_t nb_rx;
@ -1450,7 +1392,6 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->data_len = pkt_len;
rxm->port = rxq->port_id;
#ifdef RTE_NEXT_ABI
pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
pkt_info);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
@ -1462,16 +1403,6 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
#else /* RTE_NEXT_ABI */
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
rxm->ol_flags = pkt_flags;
#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
rxm->hash.rss = rte_le_to_cpu_32(
@ -1547,7 +1478,6 @@ ixgbe_fill_cluster_head_buf(
uint8_t port_id,
uint32_t staterr)
{
#ifdef RTE_NEXT_ABI
uint16_t pkt_info;
uint64_t pkt_flags;
@ -1563,23 +1493,6 @@ ixgbe_fill_cluster_head_buf(
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
head->ol_flags = pkt_flags;
head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
#else /* RTE_NEXT_ABI */
uint32_t hlen_type_rss;
uint64_t pkt_flags;
head->port = port_id;
/*
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
head->ol_flags = pkt_flags;
#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);

View File

@ -140,19 +140,11 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
*/
#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
#ifndef RTE_NEXT_ABI
#define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\
PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\
PKT_RX_IPV6_HDR_EXT))
#define PTYPE_SHIFT (1)
#endif /* RTE_NEXT_ABI */
#define VTAG_SHIFT (3)
static inline void
desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
#ifdef RTE_NEXT_ABI
__m128i ptype0, ptype1, vtag0, vtag1;
union {
uint16_t e[4];
@ -190,50 +182,6 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
vtag1 = _mm_or_si128(ptype0, vtag1);
vol.dword = _mm_cvtsi128_si64(vtag1);
#else
__m128i ptype0, ptype1, vtag0, vtag1;
union {
uint16_t e[4];
uint64_t dword;
} vol;
/* pkt type + vlan olflags mask */
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
OLFLAGS_MASK, OLFLAGS_MASK, OLFLAGS_MASK, OLFLAGS_MASK);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
0x000F, 0x000F, 0x000F, 0x000F);
/* rss type to PKT_RX_RSS_HASH translation */
const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
0, 0, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]);
ptype1 = _mm_unpacklo_epi32(ptype0, ptype1);
vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
ptype0 = _mm_and_si128(ptype1, rsstype_msk);
ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT);
vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
ptype1 = _mm_or_si128(ptype1, vtag1);
ptype1 = _mm_and_si128(ptype1, pkttype_msk);
ptype0 = _mm_or_si128(ptype0, ptype1);
vol.dword = _mm_cvtsi128_si64(ptype0);
#endif /* RTE_NEXT_ABI */
rx_pkts[0]->ol_flags = vol.e[0];
rx_pkts[1]->ol_flags = vol.e[1];
@ -264,7 +212,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
int pos;
uint64_t var;
__m128i shuf_msk;
#ifdef RTE_NEXT_ABI
__m128i crc_adjust = _mm_set_epi16(
0, 0, 0, /* ignore non-length fields */
-rxq->crc_len, /* sub crc on data_len */
@ -275,16 +222,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i dd_check, eop_check;
__m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFF07F0);
#else
__m128i crc_adjust = _mm_set_epi16(
0, 0, 0, 0, /* ignore non-length fields */
0, /* ignore high-16bits of pkt_len */
-rxq->crc_len, /* sub crc on pkt_len */
-rxq->crc_len, /* sub crc on data_len */
0 /* ignore pkt_type field */
);
__m128i dd_check, eop_check;
#endif /* RTE_NEXT_ABI */
if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST))
return 0;
@ -313,7 +250,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
/* mask to shuffle from desc. to mbuf */
#ifdef RTE_NEXT_ABI
shuf_msk = _mm_set_epi8(
7, 6, 5, 4, /* octet 4~7, 32bits rss */
15, 14, /* octet 14~15, low 16 bits vlan_macip */
@ -324,23 +260,11 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1, /* octet 1, 8 bits pkt_type field */
0 /* octet 0, 4 bits offset 4 pkt_type field */
);
#else
shuf_msk = _mm_set_epi8(
7, 6, 5, 4, /* octet 4~7, 32bits rss */
0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */
15, 14, /* octet 14~15, low 16 bits vlan_macip */
0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
13, 12, /* octet 12~13, low 16 bits pkt_len */
13, 12, /* octet 12~13, 16 bits data_len */
0xFF, 0xFF /* skip pkt_type field */
);
#endif /* RTE_NEXT_ABI */
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache */
sw_ring = &rxq->sw_ring[rxq->rx_tail];
#ifdef RTE_NEXT_ABI
/* A. load 4 packet in one loop
* [A*. mask out 4 unused dirty field in desc]
* B. copy 4 mbuf point from swring to rx_pkts
@ -348,20 +272,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* [C*. extract the end-of-packet bit, if requested]
* D. fill info. from desc to mbuf
*/
#else
/* A. load 4 packet in one loop
* B. copy 4 mbuf point from swring to rx_pkts
* C. calc the number of DD bits among the 4 packets
* [C*. extract the end-of-packet bit, if requested]
* D. fill info. from desc to mbuf
*/
#endif /* RTE_NEXT_ABI */
for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST;
pos += RTE_IXGBE_DESCS_PER_LOOP,
rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
#ifdef RTE_NEXT_ABI
__m128i descs0[RTE_IXGBE_DESCS_PER_LOOP];
#endif /* RTE_NEXT_ABI */
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
@ -377,7 +291,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* B.1 load 1 mbuf point */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
#ifdef RTE_NEXT_ABI
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
@ -403,25 +316,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* A* mask out 0~3 bits RSS type */
descs[1] = _mm_and_si128(descs0[1], desc_mask);
descs[0] = _mm_and_si128(descs0[0], desc_mask);
#else
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
/* B.1 load 1 mbuf point */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
/* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2);
#endif /* RTE_NEXT_ABI */
/* avoid compiler reorder optimization */
rte_compiler_barrier();
@ -435,13 +329,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* C.1 4=>2 filter staterr info only */
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
#ifdef RTE_NEXT_ABI
/* set ol_flags with vlan packet type */
desc_to_olflags_v(descs0, &rx_pkts[pos]);
#else
/* set ol_flags with packet type and vlan tag */
desc_to_olflags_v(descs, &rx_pkts[pos]);
#endif /* RTE_NEXT_ABI */
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);

View File

@ -1264,16 +1264,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* offsets but automatically recognizes the packet
* type. For inner L3/L4 checksums, only VXLAN (UDP)
* tunnels are currently supported. */
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
#else
/* FIXME: since PKT_TX_UDP_TUNNEL_PKT has been removed,
* the outer packet type is unknown. All we know is
* that the L2 header is of unusual length (not
* ETHER_HDR_LEN with or without 802.1Q header). */
if ((buf->l2_len != ETHER_HDR_LEN) &&
(buf->l2_len != (ETHER_HDR_LEN + 4)))
#endif
send_flags |= IBV_EXP_QP_BURST_TUNNEL;
}
if (likely(segs == 1)) {
@ -2488,7 +2479,6 @@ rxq_cleanup(struct rxq *rxq)
memset(rxq, 0, sizeof(*rxq));
}
#ifdef RTE_NEXT_ABI
/**
* Translate RX completion flags to packet type.
*
@ -2521,7 +2511,6 @@ rxq_cq_to_pkt_type(uint32_t flags)
IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6);
return pkt_type;
}
#endif /* RTE_NEXT_ABI */
/**
* Translate RX completion flags to offload flags.
@ -2539,11 +2528,6 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
{
uint32_t ol_flags = 0;
#ifndef RTE_NEXT_ABI
ol_flags =
TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV4_PACKET, PKT_RX_IPV4_HDR) |
TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV6_PACKET, PKT_RX_IPV6_HDR);
#endif
if (rxq->csum)
ol_flags |=
TRANSPOSE(~flags,
@ -2559,14 +2543,6 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
*/
if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
ol_flags |=
#ifndef RTE_NEXT_ABI
TRANSPOSE(flags,
IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
PKT_RX_TUNNEL_IPV4_HDR) |
TRANSPOSE(flags,
IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
PKT_RX_TUNNEL_IPV6_HDR) |
#endif
TRANSPOSE(~flags,
IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
PKT_RX_IP_CKSUM_BAD) |
@ -2758,10 +2734,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
NB_SEGS(pkt_buf) = j;
PORT(pkt_buf) = rxq->port_id;
PKT_LEN(pkt_buf) = pkt_buf_len;
#ifdef RTE_NEXT_ABI
pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
#endif
pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
/* Return packet. */
*(pkts++) = pkt_buf;
@ -2921,9 +2894,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
NEXT(seg) = NULL;
PKT_LEN(seg) = len;
DATA_LEN(seg) = len;
#ifdef RTE_NEXT_ABI
seg->packet_type = rxq_cq_to_pkt_type(flags);
#endif
seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
/* Return packet. */

View File

@ -520,17 +520,9 @@ vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
#ifdef RTE_NEXT_ABI
rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
#else
rxm->ol_flags |= PKT_RX_IPV4_HDR_EXT;
#endif
else
#ifdef RTE_NEXT_ABI
rxm->packet_type = RTE_PTYPE_L3_IPV4;
#else
rxm->ol_flags |= PKT_RX_IPV4_HDR;
#endif
if (!rcd->cnc) {
if (!rcd->ipc)

View File

@ -283,11 +283,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
len = qconf->tx_mbufs[port_out].len;
/* if this is an IPv4 packet */
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
#else
if (m->ol_flags & PKT_RX_IPV4_HDR) {
#endif
struct ipv4_hdr *ip_hdr;
uint32_t ip_dst;
/* Read the lookup key (i.e. ip_dst) from the input packet */
@ -321,14 +317,8 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
if (unlikely (len2 < 0))
return;
}
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* if this is an IPv6 packet */
#else
}
/* if this is an IPv6 packet */
else if (m->ol_flags & PKT_RX_IPV6_HDR) {
#endif
struct ipv6_hdr *ip_hdr;
ipv6 = 1;

View File

@ -356,11 +356,7 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
dst_port = portid;
/* if packet is IPv4 */
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
#else
if (m->ol_flags & (PKT_RX_IPV4_HDR)) {
#endif
struct ipv4_hdr *ip_hdr;
uint32_t ip_dst;
@ -400,14 +396,8 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
}
eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
/* if packet is IPv6 */
#else
}
/* if packet is IPv6 */
else if (m->ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) {
#endif
struct ipv6_extension_fragment *frag_hdr;
struct ipv6_hdr *ip_hdr;

View File

@ -645,13 +645,7 @@ prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
struct ipv4_hdr *ipv4_hdr;
struct rte_mbuf *pkt = pkts_in[index];
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
#else
int type = pkt->ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV6_HDR);
if (type == PKT_RX_IPV4_HDR) {
#endif
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
sizeof(struct ether_hdr));
@ -670,11 +664,7 @@ prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
/* Not a valid IPv4 packet */
rte_pktmbuf_free(pkt);
}
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
#else
} else if (type == PKT_RX_IPV6_HDR) {
#endif
/* Fill acl structure */
acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt);
acl->m_ipv6[(acl->num_ipv6)++] = pkt;
@ -692,22 +682,12 @@ prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
{
struct rte_mbuf *pkt = pkts_in[index];
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
#else
int type = pkt->ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV6_HDR);
if (type == PKT_RX_IPV4_HDR) {
#endif
/* Fill acl structure */
acl->data_ipv4[acl->num_ipv4] = MBUF_IPV4_2PROTO(pkt);
acl->m_ipv4[(acl->num_ipv4)++] = pkt;
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
#else
} else if (type == PKT_RX_IPV6_HDR) {
#endif
/* Fill acl structure */
acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt);
acl->m_ipv6[(acl->num_ipv6)++] = pkt;
@ -755,17 +735,10 @@ send_one_packet(struct rte_mbuf *m, uint32_t res)
/* in the ACL list, drop it */
#ifdef L3FWDACL_DEBUG
if ((res & ACL_DENY_SIGNATURE) != 0) {
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(m->packet_type))
dump_acl4_rule(m, res);
else if (RTE_ETH_IS_IPV6_HDR(m->packet_type))
dump_acl6_rule(m, res);
#else
if (m->ol_flags & PKT_RX_IPV4_HDR)
dump_acl4_rule(m, res);
else
dump_acl6_rule(m, res);
#endif /* RTE_NEXT_ABI */
}
#endif
rte_pktmbuf_free(m);

View File

@ -650,11 +650,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
#else
if (m->ol_flags & PKT_RX_IPV4_HDR) {
#endif
/* Handle IPv4 headers.*/
ipv4_hdr =
rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
@ -689,12 +685,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
send_single_packet(m, dst_port);
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
#else
}
else {
#endif
/* Handle IPv6 headers.*/
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
struct ipv6_hdr *ipv6_hdr;

View File

@ -1073,11 +1073,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
#else
if (m->ol_flags & PKT_RX_IPV4_HDR) {
#endif
/* Handle IPv4 headers.*/
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
sizeof(struct ether_hdr));
@ -1108,11 +1104,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon
ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
send_single_packet(m, dst_port);
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
#else
} else {
#endif
/* Handle IPv6 headers.*/
struct ipv6_hdr *ipv6_hdr;
@ -1131,13 +1123,9 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon
ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
send_single_packet(m, dst_port);
#ifdef RTE_NEXT_ABI
} else
/* Free the mbuf that contains non-IPV4/IPV6 packet */
rte_pktmbuf_free(m);
#else
}
#endif
}
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
@ -1163,19 +1151,11 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, struct lcore_conf *qcon
* to BAD_PORT value.
*/
static inline __attribute__((always_inline)) void
#ifdef RTE_NEXT_ABI
rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
#else
rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t flags)
#endif
{
uint8_t ihl;
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(ptype)) {
#else
if ((flags & PKT_RX_IPV4_HDR) != 0) {
#endif
ihl = ipv4_hdr->version_ihl - IPV4_MIN_VER_IHL;
ipv4_hdr->time_to_live--;
@ -1206,19 +1186,11 @@ get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
struct ipv6_hdr *ipv6_hdr;
struct ether_hdr *eth_hdr;
#ifdef RTE_NEXT_ABI
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
#else
if (pkt->ol_flags & PKT_RX_IPV4_HDR) {
#endif
if (rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
&next_hop) != 0)
next_hop = portid;
#ifdef RTE_NEXT_ABI
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
#else
} else if (pkt->ol_flags & PKT_RX_IPV6_HDR) {
#endif
eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
if (rte_lpm6_lookup(qconf->ipv6_lookup_struct,
@ -1252,17 +1224,12 @@ process_packet(struct lcore_conf *qconf, struct rte_mbuf *pkt,
ve = val_eth[dp];
dst_port[0] = dp;
#ifdef RTE_NEXT_ABI
rfc1812_process(ipv4_hdr, dst_port, pkt->packet_type);
#else
rfc1812_process(ipv4_hdr, dst_port, pkt->ol_flags);
#endif
te = _mm_blend_epi16(te, ve, MASK_ETH);
_mm_store_si128((__m128i *)eth_hdr, te);
}
#ifdef RTE_NEXT_ABI
/*
* Read packet_type and destination IPV4 addresses from 4 mbufs.
*/
@ -1297,57 +1264,18 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
dip[0] = _mm_set_epi32(x3, x2, x1, x0);
}
#else /* RTE_NEXT_ABI */
/*
* Read ol_flags and destination IPV4 addresses from 4 mbufs.
*/
static inline void
processx4_step1(struct rte_mbuf *pkt[FWDSTEP], __m128i *dip, uint32_t *flag)
{
struct ipv4_hdr *ipv4_hdr;
struct ether_hdr *eth_hdr;
uint32_t x0, x1, x2, x3;
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
x0 = ipv4_hdr->dst_addr;
flag[0] = pkt[0]->ol_flags & PKT_RX_IPV4_HDR;
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
x1 = ipv4_hdr->dst_addr;
flag[0] &= pkt[1]->ol_flags;
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
x2 = ipv4_hdr->dst_addr;
flag[0] &= pkt[2]->ol_flags;
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
x3 = ipv4_hdr->dst_addr;
flag[0] &= pkt[3]->ol_flags;
dip[0] = _mm_set_epi32(x3, x2, x1, x0);
}
#endif /* RTE_NEXT_ABI */
/*
* Lookup into LPM for destination port.
* If lookup fails, use incoming port (portid) as destination port.
*/
static inline void
#ifdef RTE_NEXT_ABI
processx4_step2(const struct lcore_conf *qconf,
__m128i dip,
uint32_t ipv4_flag,
uint8_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
#else
processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag,
uint8_t portid, struct rte_mbuf *pkt[FWDSTEP], uint16_t dprt[FWDSTEP])
#endif /* RTE_NEXT_ABI */
{
rte_xmm_t dst;
const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
@ -1357,11 +1285,7 @@ processx4_step2(const struct lcore_conf *qconf, __m128i dip, uint32_t flag,
dip = _mm_shuffle_epi8(dip, bswap_mask);
/* if all 4 packets are IPV4. */
#ifdef RTE_NEXT_ABI
if (likely(ipv4_flag)) {
#else
if (likely(flag != 0)) {
#endif
rte_lpm_lookupx4(qconf->ipv4_lookup_struct, dip, dprt, portid);
} else {
dst.x = dip;
@ -1411,7 +1335,6 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
_mm_store_si128(p[2], te[2]);
_mm_store_si128(p[3], te[3]);
#ifdef RTE_NEXT_ABI
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
&dst_port[0], pkt[0]->packet_type);
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
@ -1420,16 +1343,6 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
&dst_port[2], pkt[2]->packet_type);
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
&dst_port[3], pkt[3]->packet_type);
#else /* RTE_NEXT_ABI */
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
&dst_port[0], pkt[0]->ol_flags);
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
&dst_port[1], pkt[1]->ol_flags);
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
&dst_port[2], pkt[2]->ol_flags);
rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
&dst_port[3], pkt[3]->ol_flags);
#endif /* RTE_NEXT_ABI */
}
/*
@ -1616,11 +1529,7 @@ main_loop(__attribute__((unused)) void *dummy)
uint16_t *lp;
uint16_t dst_port[MAX_PKT_BURST];
__m128i dip[MAX_PKT_BURST / FWDSTEP];
#ifdef RTE_NEXT_ABI
uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
#else
uint32_t flag[MAX_PKT_BURST / FWDSTEP];
#endif
uint16_t pnum[MAX_PKT_BURST + 1];
#endif
@ -1690,7 +1599,6 @@ main_loop(__attribute__((unused)) void *dummy)
*/
int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8);
for (j = 0; j < n; j += 8) {
#ifdef RTE_NEXT_ABI
uint32_t pkt_type =
pkts_burst[j]->packet_type &
pkts_burst[j+1]->packet_type &
@ -1705,20 +1613,6 @@ main_loop(__attribute__((unused)) void *dummy)
&pkts_burst[j], portid, qconf);
} else if (pkt_type &
RTE_PTYPE_L3_IPV6) {
#else /* RTE_NEXT_ABI */
uint32_t ol_flag = pkts_burst[j]->ol_flags
& pkts_burst[j+1]->ol_flags
& pkts_burst[j+2]->ol_flags
& pkts_burst[j+3]->ol_flags
& pkts_burst[j+4]->ol_flags
& pkts_burst[j+5]->ol_flags
& pkts_burst[j+6]->ol_flags
& pkts_burst[j+7]->ol_flags;
if (ol_flag & PKT_RX_IPV4_HDR ) {
simple_ipv4_fwd_8pkts(&pkts_burst[j],
portid, qconf);
} else if (ol_flag & PKT_RX_IPV6_HDR) {
#endif /* RTE_NEXT_ABI */
simple_ipv6_fwd_8pkts(&pkts_burst[j],
portid, qconf);
} else {
@ -1751,21 +1645,13 @@ main_loop(__attribute__((unused)) void *dummy)
for (j = 0; j != k; j += FWDSTEP) {
processx4_step1(&pkts_burst[j],
&dip[j / FWDSTEP],
#ifdef RTE_NEXT_ABI
&ipv4_flag[j / FWDSTEP]);
#else
&flag[j / FWDSTEP]);
#endif
}
k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
for (j = 0; j != k; j += FWDSTEP) {
processx4_step2(qconf, dip[j / FWDSTEP],
#ifdef RTE_NEXT_ABI
ipv4_flag[j / FWDSTEP], portid,
#else
flag[j / FWDSTEP], portid,
#endif
&pkts_burst[j], &dst_port[j]);
}

View File

@ -180,12 +180,7 @@ decapsulation(struct rte_mbuf *pkt)
* (rfc7348) or that the rx offload flag is set (i40e only
* currently)*/
if (udp_hdr->dst_port != rte_cpu_to_be_16(DEFAULT_VXLAN_PORT) &&
#ifdef RTE_NEXT_ABI
(pkt->packet_type & RTE_PTYPE_TUNNEL_MASK) == 0)
#else
(pkt->ol_flags & (PKT_RX_TUNNEL_IPV4_HDR |
PKT_RX_TUNNEL_IPV6_HDR)) == 0)
#endif
return -1;
outer_header_len = info.outer_l2_len + info.outer_l3_len
+ sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr);

View File

@ -117,15 +117,9 @@ struct rte_kni_mbuf {
uint16_t data_off; /**< Start address of data in segment buffer. */
char pad1[4];
uint64_t ol_flags; /**< Offload features. */
#ifdef RTE_NEXT_ABI
char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
uint16_t data_len; /**< Amount of data in segment buffer. */
#else
char pad2[2];
uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
#endif
/* fields on second cache line */
char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));

View File

@ -38,7 +38,7 @@ CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
EXPORT_MAP := rte_mbuf_version.map
LIBABIVER := 1
LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c

View File

@ -258,18 +258,8 @@ const char *rte_get_rx_ol_flag_name(uint64_t mask)
/* case PKT_RX_HBUF_OVERFLOW: return "PKT_RX_HBUF_OVERFLOW"; */
/* case PKT_RX_RECIP_ERR: return "PKT_RX_RECIP_ERR"; */
/* case PKT_RX_MAC_ERR: return "PKT_RX_MAC_ERR"; */
#ifndef RTE_NEXT_ABI
case PKT_RX_IPV4_HDR: return "PKT_RX_IPV4_HDR";
case PKT_RX_IPV4_HDR_EXT: return "PKT_RX_IPV4_HDR_EXT";
case PKT_RX_IPV6_HDR: return "PKT_RX_IPV6_HDR";
case PKT_RX_IPV6_HDR_EXT: return "PKT_RX_IPV6_HDR_EXT";
#endif /* RTE_NEXT_ABI */
case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
#ifndef RTE_NEXT_ABI
case PKT_RX_TUNNEL_IPV4_HDR: return "PKT_RX_TUNNEL_IPV4_HDR";
case PKT_RX_TUNNEL_IPV6_HDR: return "PKT_RX_TUNNEL_IPV6_HDR";
#endif /* RTE_NEXT_ABI */
default: return NULL;
}
}

View File

@ -93,18 +93,8 @@ extern "C" {
#define PKT_RX_HBUF_OVERFLOW (0ULL << 0) /**< Header buffer overflow. */
#define PKT_RX_RECIP_ERR (0ULL << 0) /**< Hardware processing error. */
#define PKT_RX_MAC_ERR (0ULL << 0) /**< MAC error. */
#ifndef RTE_NEXT_ABI
#define PKT_RX_IPV4_HDR (1ULL << 5) /**< RX packet with IPv4 header. */
#define PKT_RX_IPV4_HDR_EXT (1ULL << 6) /**< RX packet with extended IPv4 header. */
#define PKT_RX_IPV6_HDR (1ULL << 7) /**< RX packet with IPv6 header. */
#define PKT_RX_IPV6_HDR_EXT (1ULL << 8) /**< RX packet with extended IPv6 header. */
#endif /* RTE_NEXT_ABI */
#define PKT_RX_IEEE1588_PTP (1ULL << 9) /**< RX IEEE1588 L2 Ethernet PT Packet. */
#define PKT_RX_IEEE1588_TMST (1ULL << 10) /**< RX IEEE1588 L2/L4 timestamped packet.*/
#ifndef RTE_NEXT_ABI
#define PKT_RX_TUNNEL_IPV4_HDR (1ULL << 11) /**< RX tunnel packet with IPv4 header.*/
#define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet with IPv6 header. */
#endif /* RTE_NEXT_ABI */
#define PKT_RX_FDIR_ID (1ULL << 13) /**< FD id reported if FDIR match. */
#define PKT_RX_FDIR_FLX (1ULL << 14) /**< Flexible bytes reported if FDIR match. */
#define PKT_RX_QINQ_PKT (1ULL << 15) /**< RX packet with double VLAN stripped. */
@ -209,7 +199,6 @@ extern "C" {
/* Use final bit of flags to indicate a control mbuf */
#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */
#ifdef RTE_NEXT_ABI
/*
* 32 bits are divided into several fields to mark packet types. Note that
* each field is indexical.
@ -696,7 +685,6 @@ extern "C" {
RTE_PTYPE_INNER_L2_MASK | \
RTE_PTYPE_INNER_L3_MASK | \
RTE_PTYPE_INNER_L4_MASK))
#endif /* RTE_NEXT_ABI */
/** Alignment constraint of mbuf private area. */
#define RTE_MBUF_PRIV_ALIGN 8
@ -775,7 +763,6 @@ struct rte_mbuf {
/* remaining bytes are set on RX when pulling packet from descriptor */
MARKER rx_descriptor_fields1;
#ifdef RTE_NEXT_ABI
/*
* The packet type, which is the combination of outer/inner L2, L3, L4
* and tunnel types.
@ -796,19 +783,7 @@ struct rte_mbuf {
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
uint16_t data_len; /**< Amount of data in segment buffer. */
uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
#else /* RTE_NEXT_ABI */
/**
* The packet type, which is used to indicate ordinary packet and also
* tunneled packet format, i.e. each number is represented a type of
* packet.
*/
uint16_t packet_type;
uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
#endif /* RTE_NEXT_ABI */
union {
uint32_t rss; /**< RSS hash result if RSS enabled */
struct {
@ -829,9 +804,8 @@ struct rte_mbuf {
} hash; /**< hash information */
uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
#ifdef RTE_NEXT_ABI
uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
#endif /* RTE_NEXT_ABI */
/* second cache line - fields only used in slow path or on TX */
MARKER cacheline1 __rte_cache_aligned;