net: add rte prefix to IP structure
Add 'rte_' prefix to structures: - rename struct ipv4_hdr as struct rte_ipv4_hdr. - rename struct ipv6_hdr as struct rte_ipv6_hdr. Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Reviewed-by: Stephen Hemminger <stephen@networkplumber.org> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
ecaeed4f1d
commit
a7c528e5d7
@ -40,7 +40,7 @@ struct rte_acl_field_def ipv4_field_formats[NUM_FIELDS_IPV4] = {
|
||||
.field_index = PROTO_FIELD_IPV4,
|
||||
.input_index = PROTO_FIELD_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, next_proto_id),
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
@ -48,7 +48,7 @@ struct rte_acl_field_def ipv4_field_formats[NUM_FIELDS_IPV4] = {
|
||||
.field_index = SRC_FIELD_IPV4,
|
||||
.input_index = SRC_FIELD_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, src_addr),
|
||||
offsetof(struct rte_ipv4_hdr, src_addr),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
@ -56,7 +56,7 @@ struct rte_acl_field_def ipv4_field_formats[NUM_FIELDS_IPV4] = {
|
||||
.field_index = DST_FIELD_IPV4,
|
||||
.input_index = DST_FIELD_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, dst_addr),
|
||||
offsetof(struct rte_ipv4_hdr, dst_addr),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_RANGE,
|
||||
@ -64,7 +64,7 @@ struct rte_acl_field_def ipv4_field_formats[NUM_FIELDS_IPV4] = {
|
||||
.field_index = SRCP_FIELD_IPV4,
|
||||
.input_index = SRCP_FIELD_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr),
|
||||
sizeof(struct rte_ipv4_hdr),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_RANGE,
|
||||
@ -72,7 +72,7 @@ struct rte_acl_field_def ipv4_field_formats[NUM_FIELDS_IPV4] = {
|
||||
.field_index = DSTP_FIELD_IPV4,
|
||||
.input_index = SRCP_FIELD_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) + sizeof(uint16_t),
|
||||
sizeof(struct rte_ipv4_hdr) + sizeof(uint16_t),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -426,8 +426,8 @@ app_main_loop_rx_metadata(void) {
|
||||
for (j = 0; j < n_mbufs; j++) {
|
||||
struct rte_mbuf *m;
|
||||
uint8_t *m_data, *key;
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
uint32_t ip_dst;
|
||||
uint8_t *ipv6_dst;
|
||||
uint32_t *signature, *k32;
|
||||
@ -440,14 +440,14 @@ app_main_loop_rx_metadata(void) {
|
||||
APP_METADATA_OFFSET(32));
|
||||
|
||||
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
||||
ip_hdr = (struct ipv4_hdr *)
|
||||
ip_hdr = (struct rte_ipv4_hdr *)
|
||||
&m_data[sizeof(struct rte_ether_hdr)];
|
||||
ip_dst = ip_hdr->dst_addr;
|
||||
|
||||
k32 = (uint32_t *) key;
|
||||
k32[0] = ip_dst & 0xFFFFFF00;
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
|
||||
ipv6_hdr = (struct ipv6_hdr *)
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)
|
||||
&m_data[sizeof(struct rte_ether_hdr)];
|
||||
ipv6_dst = ipv6_hdr->dst_addr;
|
||||
|
||||
|
@ -100,7 +100,7 @@ get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
|
||||
|
||||
/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
|
||||
static void
|
||||
parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
|
||||
parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
|
||||
{
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
|
||||
@ -119,11 +119,11 @@ parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
|
||||
|
||||
/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
|
||||
static void
|
||||
parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
|
||||
parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
|
||||
{
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
|
||||
info->l3_len = sizeof(struct ipv6_hdr);
|
||||
info->l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
info->l4_proto = ipv6_hdr->proto;
|
||||
|
||||
/* only fill l4_len for TCP, it's useful for TSO */
|
||||
@ -144,8 +144,8 @@ parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
|
||||
static void
|
||||
parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
|
||||
info->l2_len = sizeof(struct rte_ether_hdr);
|
||||
info->ethertype = eth_hdr->ether_type;
|
||||
@ -160,11 +160,13 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
|
||||
|
||||
switch (info->ethertype) {
|
||||
case _htons(RTE_ETHER_TYPE_IPv4):
|
||||
ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)
|
||||
((char *)eth_hdr + info->l2_len);
|
||||
parse_ipv4(ipv4_hdr, info);
|
||||
break;
|
||||
case _htons(RTE_ETHER_TYPE_IPv6):
|
||||
ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)
|
||||
((char *)eth_hdr + info->l2_len);
|
||||
parse_ipv6(ipv6_hdr, info);
|
||||
break;
|
||||
default:
|
||||
@ -210,8 +212,8 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
|
||||
struct testpmd_offload_info *info)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
|
||||
uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
|
||||
|
||||
@ -230,7 +232,7 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
|
||||
info->outer_l3_len = info->l3_len;
|
||||
info->outer_l4_proto = info->l4_proto;
|
||||
|
||||
ipv4_hdr = (struct ipv4_hdr *)((char *)vxlan_gpe_hdr +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
|
||||
vxlan_gpe_len);
|
||||
|
||||
parse_ipv4(ipv4_hdr, info);
|
||||
@ -244,7 +246,7 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
|
||||
info->outer_l3_len = info->l3_len;
|
||||
info->outer_l4_proto = info->l4_proto;
|
||||
|
||||
ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
|
||||
vxlan_gpe_len);
|
||||
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
|
||||
@ -273,8 +275,8 @@ static void
|
||||
parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
uint8_t gre_len = 0;
|
||||
|
||||
gre_len += sizeof(struct simple_gre_hdr);
|
||||
@ -293,7 +295,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
|
||||
info->outer_l3_len = info->l3_len;
|
||||
info->outer_l4_proto = info->l4_proto;
|
||||
|
||||
ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
|
||||
|
||||
parse_ipv4(ipv4_hdr, info);
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
|
||||
@ -306,7 +308,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
|
||||
info->outer_l3_len = info->l3_len;
|
||||
info->outer_l4_proto = info->l4_proto;
|
||||
|
||||
ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
|
||||
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
|
||||
parse_ipv6(ipv6_hdr, info);
|
||||
@ -333,8 +335,8 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
|
||||
static void
|
||||
parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr = encap_ip;
|
||||
struct ipv6_hdr *ipv6_hdr = encap_ip;
|
||||
struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
|
||||
struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
|
||||
uint8_t ip_version;
|
||||
|
||||
ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
|
||||
@ -363,7 +365,7 @@ static uint64_t
|
||||
process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
|
||||
uint64_t tx_offloads)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr = l3_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
struct sctp_hdr *sctp_hdr;
|
||||
@ -454,8 +456,8 @@ static uint64_t
|
||||
process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
|
||||
uint64_t tx_offloads, int tso_enabled)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
uint64_t ol_flags = 0;
|
||||
|
||||
|
@ -120,7 +120,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
|
||||
struct rte_mempool *mbp;
|
||||
struct rte_mbuf *pkt;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
uint16_t vlan_tci, vlan_tci_outer;
|
||||
uint64_t ol_flags = 0;
|
||||
@ -176,7 +176,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
|
||||
eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
|
||||
/* Initialize IP header. */
|
||||
ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
memset(ip_hdr, 0, sizeof(*ip_hdr));
|
||||
ip_hdr->version_ihl = IP_VHL_DEF;
|
||||
ip_hdr->type_of_service = 0;
|
||||
@ -206,7 +206,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
|
||||
pkt->vlan_tci = vlan_tci;
|
||||
pkt->vlan_tci_outer = vlan_tci_outer;
|
||||
pkt->l2_len = sizeof(struct rte_ether_hdr);
|
||||
pkt->l3_len = sizeof(struct ipv4_hdr);
|
||||
pkt->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
pkts_burst[nb_pkt] = pkt;
|
||||
|
||||
next_flow = (next_flow + 1) % cfg_n_flows;
|
||||
|
@ -243,7 +243,7 @@ ipv4_addr_dump(const char *what, uint32_t be_ipv4_addr)
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
ipv4_hdr_cksum(struct ipv4_hdr *ip_h)
|
||||
ipv4_hdr_cksum(struct rte_ipv4_hdr *ip_h)
|
||||
{
|
||||
uint16_t *v16_h;
|
||||
uint32_t ip_cksum;
|
||||
@ -278,7 +278,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
|
||||
struct rte_ether_hdr *eth_h;
|
||||
struct rte_vlan_hdr *vlan_h;
|
||||
struct rte_arp_hdr *arp_h;
|
||||
struct ipv4_hdr *ip_h;
|
||||
struct rte_ipv4_hdr *ip_h;
|
||||
struct rte_icmp_hdr *icmp_h;
|
||||
struct rte_ether_addr eth_addr;
|
||||
uint32_t retry;
|
||||
@ -418,7 +418,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
|
||||
rte_pktmbuf_free(pkt);
|
||||
continue;
|
||||
}
|
||||
ip_h = (struct ipv4_hdr *) ((char *)eth_h + l2_len);
|
||||
ip_h = (struct rte_ipv4_hdr *) ((char *)eth_h + l2_len);
|
||||
if (verbose_level > 0) {
|
||||
ipv4_addr_dump(" IPV4: src=", ip_h->src_addr);
|
||||
ipv4_addr_dump(" dst=", ip_h->dst_addr);
|
||||
@ -431,7 +431,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
|
||||
* Check if packet is a ICMP echo request.
|
||||
*/
|
||||
icmp_h = (struct rte_icmp_hdr *) ((char *)ip_h +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
if (! ((ip_h->next_proto_id == IPPROTO_ICMP) &&
|
||||
(icmp_h->icmp_type == RTE_IP_ICMP_ECHO_REQUEST) &&
|
||||
(icmp_h->icmp_code == 0))) {
|
||||
|
@ -99,7 +99,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
|
||||
mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
|
||||
mb->ol_flags |= ol_flags;
|
||||
mb->l2_len = sizeof(struct rte_ether_hdr);
|
||||
mb->l3_len = sizeof(struct ipv4_hdr);
|
||||
mb->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
mb->vlan_tci = txp->tx_vlan_id;
|
||||
mb->vlan_tci_outer = txp->tx_vlan_id_outer;
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ mbuf_field_set(struct rte_mbuf *mb, uint64_t ol_flags)
|
||||
mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
|
||||
mb->ol_flags |= ol_flags;
|
||||
mb->l2_len = sizeof(struct rte_ether_hdr);
|
||||
mb->l3_len = sizeof(struct ipv4_hdr);
|
||||
mb->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
}
|
||||
|
||||
#endif /* _MACSWAP_COMMON_H_ */
|
||||
|
@ -53,7 +53,7 @@ uint32_t tx_ip_dst_addr = (192U << 24) | (18 << 16) | (0 << 8) | 2;
|
||||
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
|
||||
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
|
||||
|
||||
static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
|
||||
static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
|
||||
RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
|
||||
static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
|
||||
|
||||
@ -95,7 +95,7 @@ copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
|
||||
}
|
||||
|
||||
static void
|
||||
setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
|
||||
setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
|
||||
struct udp_hdr *udp_hdr,
|
||||
uint16_t pkt_data_len)
|
||||
{
|
||||
@ -115,7 +115,7 @@ setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
|
||||
/*
|
||||
* Initialize IP header.
|
||||
*/
|
||||
pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
|
||||
pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr));
|
||||
ip_hdr->version_ihl = IP_VHL_DEF;
|
||||
ip_hdr->type_of_service = 0;
|
||||
ip_hdr->fragment_offset = 0;
|
||||
@ -177,7 +177,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
|
||||
pkt->vlan_tci = vlan_tci;
|
||||
pkt->vlan_tci_outer = vlan_tci_outer;
|
||||
pkt->l2_len = sizeof(struct rte_ether_hdr);
|
||||
pkt->l3_len = sizeof(struct ipv4_hdr);
|
||||
pkt->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
|
||||
pkt_len = pkt->data_len;
|
||||
pkt_seg = pkt;
|
||||
@ -195,11 +195,11 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
|
||||
copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
if (txonly_multi_flow) {
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
uint32_t addr;
|
||||
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(pkt,
|
||||
struct ipv4_hdr *,
|
||||
struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
/*
|
||||
* Generate multiple flows by varying IP src addr. This
|
||||
@ -213,7 +213,7 @@ pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp,
|
||||
}
|
||||
copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
/*
|
||||
* Complete first mbuf of packet and append it to the
|
||||
* burst of packets to be transmitted.
|
||||
@ -349,7 +349,7 @@ tx_only_begin(__attribute__((unused)) portid_t pi)
|
||||
|
||||
pkt_data_len = (uint16_t) (tx_pkt_length - (
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
sizeof(struct udp_hdr)));
|
||||
setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
|
||||
}
|
||||
|
@ -103,8 +103,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
|
||||
if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK)
|
||||
printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len);
|
||||
if (is_encapsulation) {
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
uint8_t l2_len;
|
||||
uint8_t l3_len;
|
||||
@ -116,15 +116,15 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
|
||||
|
||||
/* Do not support ipv4 option field */
|
||||
if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
|
||||
l3_len = sizeof(struct ipv4_hdr);
|
||||
l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
|
||||
struct ipv4_hdr *,
|
||||
struct rte_ipv4_hdr *,
|
||||
l2_len);
|
||||
l4_proto = ipv4_hdr->next_proto_id;
|
||||
} else {
|
||||
l3_len = sizeof(struct ipv6_hdr);
|
||||
l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
|
||||
struct ipv6_hdr *,
|
||||
struct rte_ipv6_hdr *,
|
||||
l2_len);
|
||||
l4_proto = ipv6_hdr->proto;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ initialize_sctp_header(struct sctp_hdr *sctp_hdr, uint16_t src_port,
|
||||
}
|
||||
|
||||
uint16_t
|
||||
initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
|
||||
initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
|
||||
uint8_t *dst_addr, uint16_t pkt_data_len)
|
||||
{
|
||||
ip_hdr->vtc_flow = 0;
|
||||
@ -151,11 +151,11 @@ initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
|
||||
rte_memcpy(ip_hdr->src_addr, src_addr, sizeof(ip_hdr->src_addr));
|
||||
rte_memcpy(ip_hdr->dst_addr, dst_addr, sizeof(ip_hdr->dst_addr));
|
||||
|
||||
return (uint16_t) (pkt_data_len + sizeof(struct ipv6_hdr));
|
||||
return (uint16_t) (pkt_data_len + sizeof(struct rte_ipv6_hdr));
|
||||
}
|
||||
|
||||
uint16_t
|
||||
initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
uint32_t dst_addr, uint16_t pkt_data_len)
|
||||
{
|
||||
uint16_t pkt_len;
|
||||
@ -165,7 +165,7 @@ initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
/*
|
||||
* Initialize IP header.
|
||||
*/
|
||||
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
|
||||
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
|
||||
|
||||
ip_hdr->version_ihl = IP_VHL_DEF;
|
||||
ip_hdr->type_of_service = 0;
|
||||
@ -203,7 +203,7 @@ initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
}
|
||||
|
||||
uint16_t
|
||||
initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto)
|
||||
{
|
||||
uint16_t pkt_len;
|
||||
@ -213,7 +213,7 @@ initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
/*
|
||||
* Initialize IP header.
|
||||
*/
|
||||
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct ipv4_hdr));
|
||||
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_ipv4_hdr));
|
||||
|
||||
ip_hdr->version_ihl = IP_VHL_DEF;
|
||||
ip_hdr->type_of_service = 0;
|
||||
@ -304,13 +304,15 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
|
||||
copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
|
||||
|
||||
if (ipv4) {
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt, eth_hdr_size);
|
||||
copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
|
||||
sizeof(struct ipv4_hdr));
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
|
||||
pkt, eth_hdr_size);
|
||||
copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct rte_ipv4_hdr));
|
||||
} else {
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt, eth_hdr_size);
|
||||
copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt, eth_hdr_size +
|
||||
sizeof(struct ipv6_hdr));
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
|
||||
pkt, eth_hdr_size);
|
||||
copy_buf_to_pkt(udp_hdr, sizeof(*udp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct rte_ipv6_hdr));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -323,10 +325,10 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
|
||||
|
||||
if (ipv4) {
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
|
||||
pkt->l3_len = sizeof(struct ipv4_hdr);
|
||||
pkt->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
} else {
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
|
||||
pkt->l3_len = sizeof(struct ipv6_hdr);
|
||||
pkt->l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
}
|
||||
|
||||
pkts_burst[nb_pkt] = pkt;
|
||||
@ -383,45 +385,51 @@ generate_packet_burst_proto(struct rte_mempool *mp,
|
||||
copy_buf_to_pkt(eth_hdr, eth_hdr_size, pkt, 0);
|
||||
|
||||
if (ipv4) {
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct ipv4_hdr), pkt,
|
||||
eth_hdr_size);
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv4_hdr),
|
||||
pkt, eth_hdr_size);
|
||||
switch (proto) {
|
||||
case IPPROTO_UDP:
|
||||
copy_buf_to_pkt(proto_hdr,
|
||||
sizeof(struct udp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct ipv4_hdr));
|
||||
eth_hdr_size +
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
break;
|
||||
case IPPROTO_TCP:
|
||||
copy_buf_to_pkt(proto_hdr,
|
||||
sizeof(struct tcp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct ipv4_hdr));
|
||||
eth_hdr_size +
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
break;
|
||||
case IPPROTO_SCTP:
|
||||
copy_buf_to_pkt(proto_hdr,
|
||||
sizeof(struct sctp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct ipv4_hdr));
|
||||
eth_hdr_size +
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct ipv6_hdr), pkt,
|
||||
eth_hdr_size);
|
||||
copy_buf_to_pkt(ip_hdr, sizeof(struct rte_ipv6_hdr),
|
||||
pkt, eth_hdr_size);
|
||||
switch (proto) {
|
||||
case IPPROTO_UDP:
|
||||
copy_buf_to_pkt(proto_hdr,
|
||||
sizeof(struct udp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct ipv6_hdr));
|
||||
eth_hdr_size +
|
||||
sizeof(struct rte_ipv6_hdr));
|
||||
break;
|
||||
case IPPROTO_TCP:
|
||||
copy_buf_to_pkt(proto_hdr,
|
||||
sizeof(struct tcp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct ipv6_hdr));
|
||||
eth_hdr_size +
|
||||
sizeof(struct rte_ipv6_hdr));
|
||||
break;
|
||||
case IPPROTO_SCTP:
|
||||
copy_buf_to_pkt(proto_hdr,
|
||||
sizeof(struct sctp_hdr), pkt,
|
||||
eth_hdr_size + sizeof(struct ipv6_hdr));
|
||||
eth_hdr_size +
|
||||
sizeof(struct rte_ipv6_hdr));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -438,10 +446,10 @@ generate_packet_burst_proto(struct rte_mempool *mp,
|
||||
|
||||
if (ipv4) {
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
|
||||
pkt->l3_len = sizeof(struct ipv4_hdr);
|
||||
pkt->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
} else {
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
|
||||
pkt->l3_len = sizeof(struct ipv6_hdr);
|
||||
pkt->l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
}
|
||||
|
||||
pkts_burst[nb_pkt] = pkt;
|
||||
|
@ -47,15 +47,15 @@ initialize_sctp_header(struct sctp_hdr *sctp_hdr, uint16_t src_port,
|
||||
uint16_t dst_port, uint16_t pkt_data_len);
|
||||
|
||||
uint16_t
|
||||
initialize_ipv6_header(struct ipv6_hdr *ip_hdr, uint8_t *src_addr,
|
||||
initialize_ipv6_header(struct rte_ipv6_hdr *ip_hdr, uint8_t *src_addr,
|
||||
uint8_t *dst_addr, uint16_t pkt_data_len);
|
||||
|
||||
uint16_t
|
||||
initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
initialize_ipv4_header(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
uint32_t dst_addr, uint16_t pkt_data_len);
|
||||
|
||||
uint16_t
|
||||
initialize_ipv4_header_proto(struct ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
|
||||
uint32_t dst_addr, uint16_t pkt_data_len, uint8_t proto);
|
||||
|
||||
int
|
||||
|
@ -40,7 +40,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = PROTO_FIELD_IPV4,
|
||||
.input_index = PROTO_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, next_proto_id),
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
/* next input field (IPv4 source address) - 4 consecutive bytes. */
|
||||
{
|
||||
@ -50,7 +50,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = SRC_FIELD_IPV4,
|
||||
.input_index = SRC_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, src_addr),
|
||||
offsetof(struct rte_ipv4_hdr, src_addr),
|
||||
},
|
||||
/* next input field (IPv4 destination address) - 4 consecutive bytes. */
|
||||
{
|
||||
@ -60,7 +60,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = DST_FIELD_IPV4,
|
||||
.input_index = DST_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, dst_addr),
|
||||
offsetof(struct rte_ipv4_hdr, dst_addr),
|
||||
},
|
||||
/*
|
||||
* Next 2 fields (src & dst ports) form 4 consecutive bytes.
|
||||
@ -73,7 +73,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = SRCP_FIELD_IPV4,
|
||||
.input_index = SRCP_DESTP_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, src_port),
|
||||
},
|
||||
{
|
||||
@ -83,7 +83,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = DSTP_FIELD_IPV4,
|
||||
.input_index = SRCP_DESTP_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, dst_port),
|
||||
},
|
||||
};
|
||||
@ -490,7 +490,7 @@ init_ipv4_udp_traffic(struct rte_mempool *mp,
|
||||
struct rte_mbuf **pkts_burst, uint32_t burst_size)
|
||||
{
|
||||
struct rte_ether_hdr pkt_eth_hdr;
|
||||
struct ipv4_hdr pkt_ipv4_hdr;
|
||||
struct rte_ipv4_hdr pkt_ipv4_hdr;
|
||||
struct udp_hdr pkt_udp_hdr;
|
||||
uint32_t src_addr = IPV4_ADDR(2, 2, 2, 3);
|
||||
uint32_t dst_addr = IPV4_ADDR(2, 2, 2, 7);
|
||||
@ -527,7 +527,7 @@ init_ipv4_tcp_traffic(struct rte_mempool *mp,
|
||||
struct rte_mbuf **pkts_burst, uint32_t burst_size)
|
||||
{
|
||||
struct rte_ether_hdr pkt_eth_hdr;
|
||||
struct ipv4_hdr pkt_ipv4_hdr;
|
||||
struct rte_ipv4_hdr pkt_ipv4_hdr;
|
||||
struct tcp_hdr pkt_tcp_hdr;
|
||||
uint32_t src_addr = IPV4_ADDR(1, 2, 3, 4);
|
||||
uint32_t dst_addr = IPV4_ADDR(5, 6, 7, 8);
|
||||
@ -564,7 +564,7 @@ init_ipv4_sctp_traffic(struct rte_mempool *mp,
|
||||
struct rte_mbuf **pkts_burst, uint32_t burst_size)
|
||||
{
|
||||
struct rte_ether_hdr pkt_eth_hdr;
|
||||
struct ipv4_hdr pkt_ipv4_hdr;
|
||||
struct rte_ipv4_hdr pkt_ipv4_hdr;
|
||||
struct sctp_hdr pkt_sctp_hdr;
|
||||
uint32_t src_addr = IPV4_ADDR(11, 12, 13, 14);
|
||||
uint32_t dst_addr = IPV4_ADDR(15, 16, 17, 18);
|
||||
|
@ -531,7 +531,7 @@ const char null_encrypted_data[] =
|
||||
"Network Security People Have A Strange Sense Of Humor unlike Other "
|
||||
"People who have a normal sense of humour";
|
||||
|
||||
struct ipv4_hdr ipv4_outer = {
|
||||
struct rte_ipv4_hdr ipv4_outer = {
|
||||
.version_ihl = IPVERSION << 4 |
|
||||
sizeof(ipv4_outer) / IPV4_IHL_MULTIPLIER,
|
||||
.time_to_live = IPDEFTTL,
|
||||
@ -569,7 +569,8 @@ setup_test_string_tunneled(struct rte_mempool *mpool, const char *string,
|
||||
size_t len, uint32_t spi, uint32_t seq)
|
||||
{
|
||||
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
|
||||
uint32_t hdrlen = sizeof(struct ipv4_hdr) + sizeof(struct rte_esp_hdr);
|
||||
uint32_t hdrlen = sizeof(struct rte_ipv4_hdr) +
|
||||
sizeof(struct rte_esp_hdr);
|
||||
uint32_t taillen = sizeof(struct esp_tail);
|
||||
uint32_t t_len = len + hdrlen + taillen;
|
||||
uint32_t padlen;
|
||||
|
@ -78,14 +78,14 @@ struct link_bonding_unittest_params {
|
||||
|
||||
/* Packet Headers */
|
||||
struct rte_ether_hdr *pkt_eth_hdr;
|
||||
struct ipv4_hdr *pkt_ipv4_hdr;
|
||||
struct ipv6_hdr *pkt_ipv6_hdr;
|
||||
struct rte_ipv4_hdr *pkt_ipv4_hdr;
|
||||
struct rte_ipv6_hdr *pkt_ipv6_hdr;
|
||||
struct udp_hdr *pkt_udp_hdr;
|
||||
|
||||
};
|
||||
|
||||
static struct ipv4_hdr pkt_ipv4_hdr;
|
||||
static struct ipv6_hdr pkt_ipv6_hdr;
|
||||
static struct rte_ipv4_hdr pkt_ipv4_hdr;
|
||||
static struct rte_ipv6_hdr pkt_ipv6_hdr;
|
||||
static struct udp_hdr pkt_udp_hdr;
|
||||
|
||||
static struct link_bonding_unittest_params default_params = {
|
||||
|
@ -735,8 +735,8 @@ generate_packets(struct rte_ether_addr *src_mac,
|
||||
struct rte_ether_hdr pkt_eth_hdr;
|
||||
struct udp_hdr pkt_udp_hdr;
|
||||
union {
|
||||
struct ipv4_hdr v4;
|
||||
struct ipv6_hdr v6;
|
||||
struct rte_ipv4_hdr v4;
|
||||
struct rte_ipv6_hdr v6;
|
||||
} pkt_ip_hdr;
|
||||
|
||||
int retval;
|
||||
|
@ -183,7 +183,7 @@ init_traffic(struct rte_mempool *mp,
|
||||
struct rte_mbuf **pkts_burst, uint32_t burst_size)
|
||||
{
|
||||
struct rte_ether_hdr pkt_eth_hdr;
|
||||
struct ipv4_hdr pkt_ipv4_hdr;
|
||||
struct rte_ipv4_hdr pkt_ipv4_hdr;
|
||||
struct udp_hdr pkt_udp_hdr;
|
||||
uint32_t pktlen;
|
||||
static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
|
||||
|
@ -80,7 +80,7 @@ prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct rte_vlan_hdr *vlan1, *vlan2;
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
|
||||
/* Simulate a classifier */
|
||||
eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
|
||||
@ -90,7 +90,7 @@ prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)
|
||||
eth_hdr = (struct rte_ether_hdr *)(
|
||||
(uintptr_t)ð_hdr->ether_type +
|
||||
2 * sizeof(struct rte_vlan_hdr));
|
||||
ip_hdr = (struct ipv4_hdr *)(
|
||||
ip_hdr = (struct rte_ipv4_hdr *)(
|
||||
(uintptr_t)eth_hdr + sizeof(eth_hdr->ether_type));
|
||||
|
||||
vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
|
||||
|
@ -110,7 +110,7 @@ test_thash(void)
|
||||
union rte_thash_tuple tuple;
|
||||
uint32_t rss_l3, rss_l3l4;
|
||||
uint8_t rss_key_be[RTE_DIM(default_rss_key)];
|
||||
struct ipv6_hdr ipv6_hdr;
|
||||
struct rte_ipv6_hdr ipv6_hdr;
|
||||
|
||||
/* Convert RSS key*/
|
||||
rte_convert_rss_key((uint32_t *)&default_rss_key,
|
||||
|
@ -154,7 +154,7 @@ To define classification for the IPv6 2-tuple: <protocol, IPv6 source address> o
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct struct ipv6_hdr {
|
||||
struct struct rte_ipv6_hdr {
|
||||
uint32_t vtc_flow; /* IP version, traffic class & flow label. */
|
||||
uint16_t payload_len; /* IP packet length - includes sizeof(ip_header). */
|
||||
uint8_t proto; /* Protocol, next header. */
|
||||
@ -173,7 +173,7 @@ The following array of field definitions can be used:
|
||||
.size = sizeof (uint8_t),
|
||||
.field_index = 0,
|
||||
.input_index = 0,
|
||||
.offset = offsetof (struct ipv6_hdr, proto),
|
||||
.offset = offsetof (struct rte_ipv6_hdr, proto),
|
||||
},
|
||||
|
||||
{
|
||||
@ -181,7 +181,7 @@ The following array of field definitions can be used:
|
||||
.size = sizeof (uint32_t),
|
||||
.field_index = 1,
|
||||
.input_index = 1,
|
||||
.offset = offsetof (struct ipv6_hdr, src_addr[0]),
|
||||
.offset = offsetof (struct rte_ipv6_hdr, src_addr[0]),
|
||||
},
|
||||
|
||||
{
|
||||
@ -189,7 +189,7 @@ The following array of field definitions can be used:
|
||||
.size = sizeof (uint32_t),
|
||||
.field_index = 2,
|
||||
.input_index = 2,
|
||||
.offset = offsetof (struct ipv6_hdr, src_addr[4]),
|
||||
.offset = offsetof (struct rte_ipv6_hdr, src_addr[4]),
|
||||
},
|
||||
|
||||
{
|
||||
@ -197,7 +197,7 @@ The following array of field definitions can be used:
|
||||
.size = sizeof (uint32_t),
|
||||
.field_index = 3,
|
||||
.input_index = 3,
|
||||
.offset = offsetof (struct ipv6_hdr, src_addr[8]),
|
||||
.offset = offsetof (struct rte_ipv6_hdr, src_addr[8]),
|
||||
},
|
||||
|
||||
{
|
||||
@ -205,7 +205,7 @@ The following array of field definitions can be used:
|
||||
.size = sizeof (uint32_t),
|
||||
.field_index = 4,
|
||||
.input_index = 4,
|
||||
.offset = offsetof (struct ipv6_hdr, src_addr[12]),
|
||||
.offset = offsetof (struct rte_ipv6_hdr, src_addr[12]),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -92,7 +92,7 @@ initialisation of the ``Flow Classify`` application..
|
||||
.field_index = PROTO_FIELD_IPV4,
|
||||
.input_index = PROTO_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, next_proto_id),
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
/* next input field (IPv4 source address) - 4 consecutive bytes. */
|
||||
{
|
||||
@ -102,7 +102,7 @@ initialisation of the ``Flow Classify`` application..
|
||||
.field_index = SRC_FIELD_IPV4,
|
||||
.input_index = SRC_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, src_addr),
|
||||
offsetof(struct rte_ipv4_hdr, src_addr),
|
||||
},
|
||||
/* next input field (IPv4 destination address) - 4 consecutive bytes. */
|
||||
{
|
||||
@ -112,7 +112,7 @@ initialisation of the ``Flow Classify`` application..
|
||||
.field_index = DST_FIELD_IPV4,
|
||||
.input_index = DST_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, dst_addr),
|
||||
offsetof(struct rte_ipv4_hdr, dst_addr),
|
||||
},
|
||||
/*
|
||||
* Next 2 fields (src & dst ports) form 4 consecutive bytes.
|
||||
@ -125,7 +125,7 @@ initialisation of the ``Flow Classify`` application..
|
||||
.field_index = SRCP_FIELD_IPV4,
|
||||
.input_index = SRCP_DESTP_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, src_port),
|
||||
},
|
||||
{
|
||||
@ -135,7 +135,7 @@ initialisation of the ``Flow Classify`` application..
|
||||
.field_index = DSTP_FIELD_IPV4,
|
||||
.input_index = SRCP_DESTP_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, dst_port),
|
||||
},
|
||||
};
|
||||
|
@ -146,7 +146,7 @@ Firstly, the Ethernet* header is removed from the packet and the IPv4 address is
|
||||
|
||||
/* Remove the Ethernet header from the input packet */
|
||||
|
||||
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct rte_ether_hdr));
|
||||
iphdr = (struct rte_ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct rte_ether_hdr));
|
||||
RTE_ASSERT(iphdr != NULL);
|
||||
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
|
||||
|
||||
|
@ -245,7 +245,7 @@ The get_ipv4_dst_port() function is shown below:
|
||||
int ret = 0;
|
||||
union ipv4_5tuple_host key;
|
||||
|
||||
ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
|
||||
ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct rte_ipv4_hdr, time_to_live);
|
||||
|
||||
m128i data = _mm_loadu_si128(( m128i*)(ipv4_hdr));
|
||||
|
||||
@ -273,10 +273,10 @@ The key code snippet of simple_ipv4_fwd_4pkts() is shown below:
|
||||
{
|
||||
// ...
|
||||
|
||||
data[0] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[0], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
|
||||
data[1] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[1], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
|
||||
data[2] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[2], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
|
||||
data[3] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[3], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct ipv4_hdr, time_to_live)));
|
||||
data[0] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[0], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[1] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[1], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[2] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[2], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[3] = _mm_loadu_si128(( m128i*)(rte_pktmbuf_mtod(m[3], unsigned char *) + sizeof(struct rte_ether_hdr) + offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
|
||||
key[0].xmm = _mm_and_si128(data[0], mask0);
|
||||
key[1].xmm = _mm_and_si128(data[1], mask0);
|
||||
@ -309,7 +309,7 @@ for LPM-based lookups is done by the get_ipv4_dst_port() function below:
|
||||
.. code-block:: c
|
||||
|
||||
static inline uint16_t
|
||||
get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
|
||||
get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid, lookup_struct_t *ipv4_l3fwd_lookup_struct)
|
||||
{
|
||||
uint8_t next_hop;
|
||||
|
||||
|
@ -191,12 +191,12 @@ which tells the node where the packet has to be distributed.
|
||||
efd_value_t data[EFD_BURST_MAX];
|
||||
const void *key_ptrs[EFD_BURST_MAX];
|
||||
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint32_t ipv4_dst_ip[EFD_BURST_MAX];
|
||||
|
||||
for (i = 0; i < rx_count; i++) {
|
||||
/* Handle IPv4 header.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
|
||||
key_ptrs[i] = (void *)&ipv4_dst_ip[i];
|
||||
@ -348,7 +348,7 @@ flow is not handled by the node.
|
||||
static inline void
|
||||
handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint32_t ipv4_dst_ip[PKT_READ_SIZE];
|
||||
const void *key_ptrs[PKT_READ_SIZE];
|
||||
unsigned int i;
|
||||
@ -356,7 +356,7 @@ flow is not handled by the node.
|
||||
|
||||
for (i = 0; i < num_packets; i++) {
|
||||
/* Handle IPv4 header.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
|
||||
key_ptrs[i] = &ipv4_dst_ip[i];
|
||||
|
@ -591,7 +591,7 @@ mode6_debug(const char __attribute__((unused)) *info,
|
||||
struct rte_ether_hdr *eth_h, uint16_t port,
|
||||
uint32_t __attribute__((unused)) *burstnumber)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_h;
|
||||
struct rte_ipv4_hdr *ipv4_h;
|
||||
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
|
||||
struct rte_arp_hdr *arp_h;
|
||||
char dst_ip[16];
|
||||
@ -608,7 +608,7 @@ mode6_debug(const char __attribute__((unused)) *info,
|
||||
#endif
|
||||
|
||||
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
|
||||
ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
|
||||
ipv4_h = (struct rte_ipv4_hdr *)((char *)(eth_h + 1) + offset);
|
||||
ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
|
||||
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
|
||||
ipv4_addr_to_dot(ipv4_h->dst_addr, dst_ip, MaxIPv4String);
|
||||
@ -755,13 +755,13 @@ ether_hash(struct rte_ether_hdr *eth_hdr)
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
ipv4_hash(struct ipv4_hdr *ipv4_hdr)
|
||||
ipv4_hash(struct rte_ipv4_hdr *ipv4_hdr)
|
||||
{
|
||||
return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
ipv6_hash(struct ipv6_hdr *ipv6_hdr)
|
||||
ipv6_hash(struct rte_ipv6_hdr *ipv6_hdr)
|
||||
{
|
||||
unaligned_uint32_t *word_src_addr =
|
||||
(unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
|
||||
@ -812,12 +812,12 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
|
||||
vlan_offset = get_vlan_offset(eth_hdr, &proto);
|
||||
|
||||
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
|
||||
struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
|
||||
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
l3hash = ipv4_hash(ipv4_hdr);
|
||||
|
||||
} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
|
||||
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
|
||||
struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
l3hash = ipv6_hash(ipv6_hdr);
|
||||
}
|
||||
@ -852,7 +852,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
|
||||
l4hash = 0;
|
||||
|
||||
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
|
||||
struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
|
||||
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
size_t ip_hdr_offset;
|
||||
|
||||
@ -883,7 +883,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
|
||||
}
|
||||
}
|
||||
} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
|
||||
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
|
||||
struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
l3hash = ipv6_hash(ipv6_hdr);
|
||||
|
||||
|
@ -201,22 +201,22 @@ static inline void dpaa_checksum(struct rte_mbuf *mbuf)
|
||||
struct rte_ether_hdr *eth_hdr =
|
||||
rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
|
||||
char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
|
||||
struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
|
||||
|
||||
DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
|
||||
|
||||
if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
|
||||
((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
|
||||
RTE_PTYPE_L3_IPV4_EXT)) {
|
||||
ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
|
||||
ipv4_hdr->hdr_checksum = 0;
|
||||
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
|
||||
} else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
|
||||
RTE_PTYPE_L3_IPV6) ||
|
||||
((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
|
||||
RTE_PTYPE_L3_IPV6_EXT))
|
||||
ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
|
||||
|
||||
if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
|
||||
struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
|
||||
|
@ -222,7 +222,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
|
||||
/* setup IPCS* fields */
|
||||
ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
|
||||
ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
|
||||
offsetof(struct ipv4_hdr, hdr_checksum));
|
||||
offsetof(struct rte_ipv4_hdr, hdr_checksum));
|
||||
|
||||
/*
|
||||
* When doing checksum or TCP segmentation with IPv6 headers,
|
||||
|
@ -2137,7 +2137,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
uint32_t i;
|
||||
struct rte_mbuf *m;
|
||||
struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
uint64_t ol_flags;
|
||||
uint16_t frag_field;
|
||||
|
||||
@ -2154,7 +2154,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
if (unlikely(m->l2_len == 0))
|
||||
m->l2_len = sizeof(struct rte_ether_hdr);
|
||||
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
m->l2_len);
|
||||
frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
|
||||
|
||||
|
@ -182,9 +182,9 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
|
||||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
|
||||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
|
||||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
|
||||
struct ipv4_hdr ip4_mask, ip4_val;
|
||||
memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
|
||||
memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
|
||||
struct rte_ipv4_hdr ip4_mask, ip4_val;
|
||||
memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr));
|
||||
memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr));
|
||||
|
||||
if (input->flow.ip4_flow.tos) {
|
||||
ip4_mask.type_of_service = masks->ipv4_mask.tos;
|
||||
@ -212,7 +212,7 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
|
||||
}
|
||||
|
||||
enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
|
||||
&ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
|
||||
&ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr));
|
||||
}
|
||||
|
||||
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
|
||||
@ -271,9 +271,9 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
|
||||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
|
||||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
|
||||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
|
||||
struct ipv6_hdr ipv6_mask, ipv6_val;
|
||||
memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
|
||||
memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
|
||||
struct rte_ipv6_hdr ipv6_mask, ipv6_val;
|
||||
memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr));
|
||||
memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr));
|
||||
|
||||
if (input->flow.ipv6_flow.proto) {
|
||||
ipv6_mask.proto = masks->ipv6_mask.proto;
|
||||
@ -301,7 +301,7 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
|
||||
}
|
||||
|
||||
enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
|
||||
&ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
|
||||
&ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -411,7 +411,7 @@ enic_copy_item_ipv4_v1(struct copy_item_args *arg)
|
||||
const struct rte_flow_item_ipv4 *spec = item->spec;
|
||||
const struct rte_flow_item_ipv4 *mask = item->mask;
|
||||
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
|
||||
struct ipv4_hdr supported_mask = {
|
||||
struct rte_ipv4_hdr supported_mask = {
|
||||
.src_addr = 0xffffffff,
|
||||
.dst_addr = 0xffffffff,
|
||||
};
|
||||
@ -606,9 +606,9 @@ enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
|
||||
if (!mask)
|
||||
mask = &rte_flow_item_ipv4_mask;
|
||||
/* Append ipv4 header to L5 and set ether type = ipv4 */
|
||||
arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
|
||||
arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id);
|
||||
return copy_inner_common(&arg->filter->u.generic_1, off,
|
||||
arg->item->spec, mask, sizeof(struct ipv4_hdr),
|
||||
arg->item->spec, mask, sizeof(struct rte_ipv4_hdr),
|
||||
arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4), 2);
|
||||
}
|
||||
|
||||
@ -622,9 +622,9 @@ enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
|
||||
if (!mask)
|
||||
mask = &rte_flow_item_ipv6_mask;
|
||||
/* Append ipv6 header to L5 and set ether type = ipv6 */
|
||||
arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
|
||||
arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto);
|
||||
return copy_inner_common(&arg->filter->u.generic_1, off,
|
||||
arg->item->spec, mask, sizeof(struct ipv6_hdr),
|
||||
arg->item->spec, mask, sizeof(struct rte_ipv6_hdr),
|
||||
arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6), 2);
|
||||
}
|
||||
|
||||
@ -773,9 +773,9 @@ enic_copy_item_ipv4_v2(struct copy_item_args *arg)
|
||||
mask = &rte_flow_item_ipv4_mask;
|
||||
|
||||
memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -802,9 +802,9 @@ enic_copy_item_ipv6_v2(struct copy_item_args *arg)
|
||||
mask = &rte_flow_item_ipv6_mask;
|
||||
|
||||
memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
|
||||
sizeof(struct ipv6_hdr));
|
||||
sizeof(struct rte_ipv6_hdr));
|
||||
memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
|
||||
sizeof(struct ipv6_hdr));
|
||||
sizeof(struct rte_ipv6_hdr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -884,16 +884,16 @@ enic_copy_item_sctp_v2(struct copy_item_args *arg)
|
||||
* the protocol number in the IP pattern.
|
||||
*/
|
||||
if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
|
||||
struct ipv4_hdr *ip;
|
||||
ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
|
||||
struct rte_ipv4_hdr *ip;
|
||||
ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
|
||||
ip_proto_mask = &ip->next_proto_id;
|
||||
ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
|
||||
ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
|
||||
ip_proto = &ip->next_proto_id;
|
||||
} else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
|
||||
struct ipv6_hdr *ip;
|
||||
ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
|
||||
struct rte_ipv6_hdr *ip;
|
||||
ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
|
||||
ip_proto_mask = &ip->proto;
|
||||
ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
|
||||
ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
|
||||
ip_proto = &ip->proto;
|
||||
} else {
|
||||
/* Need IPv4/IPv6 pattern first */
|
||||
|
@ -686,8 +686,8 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
|
||||
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
|
||||
uint16_t *ether_type;
|
||||
uint8_t len = 2 * sizeof(struct rte_ether_addr);
|
||||
struct ipv4_hdr *ip;
|
||||
struct ipv6_hdr *ip6;
|
||||
struct rte_ipv4_hdr *ip;
|
||||
struct rte_ipv6_hdr *ip6;
|
||||
static const uint8_t next_proto[] = {
|
||||
[RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
|
||||
[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
|
||||
@ -723,7 +723,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
|
||||
case RTE_ETH_FLOW_FRAG_IPV4:
|
||||
ip = (struct ipv4_hdr *)raw_pkt;
|
||||
ip = (struct rte_ipv4_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
|
||||
@ -743,14 +743,14 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
|
||||
*/
|
||||
ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
|
||||
ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
|
||||
len += sizeof(struct ipv4_hdr);
|
||||
len += sizeof(struct rte_ipv4_hdr);
|
||||
break;
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
|
||||
case RTE_ETH_FLOW_FRAG_IPV6:
|
||||
ip6 = (struct ipv6_hdr *)raw_pkt;
|
||||
ip6 = (struct rte_ipv6_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
ip6->vtc_flow =
|
||||
@ -776,7 +776,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
|
||||
rte_memcpy(&(ip6->dst_addr),
|
||||
&(fdir_input->flow.ipv6_flow.src_ip),
|
||||
IPV6_ADDR_LEN);
|
||||
len += sizeof(struct ipv6_hdr);
|
||||
len += sizeof(struct rte_ipv6_hdr);
|
||||
break;
|
||||
default:
|
||||
PMD_DRV_LOG(ERR, "unknown flow type %u.",
|
||||
@ -960,8 +960,8 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
|
||||
static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
|
||||
uint16_t *ether_type;
|
||||
uint8_t len = 2 * sizeof(struct rte_ether_addr);
|
||||
struct ipv4_hdr *ip;
|
||||
struct ipv6_hdr *ip6;
|
||||
struct rte_ipv4_hdr *ip;
|
||||
struct rte_ipv6_hdr *ip6;
|
||||
uint8_t pctype = fdir_input->pctype;
|
||||
bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
|
||||
static const uint8_t next_proto[] = {
|
||||
@ -1007,7 +1007,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
|
||||
pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
|
||||
pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
|
||||
is_customized_pctype) {
|
||||
ip = (struct ipv4_hdr *)raw_pkt;
|
||||
ip = (struct rte_ipv4_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
|
||||
@ -1034,13 +1034,13 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
|
||||
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
|
||||
cus_pctype->index == I40E_CUSTOMIZED_GTPU)
|
||||
ip->next_proto_id = IPPROTO_UDP;
|
||||
len += sizeof(struct ipv4_hdr);
|
||||
len += sizeof(struct rte_ipv4_hdr);
|
||||
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
|
||||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
|
||||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
|
||||
pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
|
||||
pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
|
||||
ip6 = (struct ipv6_hdr *)raw_pkt;
|
||||
ip6 = (struct rte_ipv6_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
ip6->vtc_flow =
|
||||
@ -1066,7 +1066,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
|
||||
rte_memcpy(&ip6->dst_addr,
|
||||
&fdir_input->flow.ipv6_flow.src_ip,
|
||||
IPV6_ADDR_LEN);
|
||||
len += sizeof(struct ipv6_hdr);
|
||||
len += sizeof(struct rte_ipv6_hdr);
|
||||
} else {
|
||||
PMD_DRV_LOG(ERR, "unknown pctype %u.",
|
||||
fdir_input->pctype);
|
||||
@ -1093,8 +1093,8 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
|
||||
struct tcp_hdr *tcp;
|
||||
struct sctp_hdr *sctp;
|
||||
struct rte_flow_item_gtp *gtp;
|
||||
struct ipv4_hdr *gtp_ipv4;
|
||||
struct ipv6_hdr *gtp_ipv6;
|
||||
struct rte_ipv4_hdr *gtp_ipv4;
|
||||
struct rte_ipv6_hdr *gtp_ipv6;
|
||||
uint8_t size, dst = 0;
|
||||
uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
|
||||
int len;
|
||||
@ -1232,7 +1232,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
|
||||
|
||||
if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
|
||||
gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
|
||||
gtp_ipv4 = (struct ipv4_hdr *)
|
||||
gtp_ipv4 = (struct rte_ipv4_hdr *)
|
||||
((unsigned char *)gtp +
|
||||
sizeof(struct rte_flow_item_gtp));
|
||||
gtp_ipv4->version_ihl =
|
||||
@ -1242,11 +1242,11 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
|
||||
rte_cpu_to_be_16(
|
||||
I40E_FDIR_INNER_IP_DEFAULT_LEN);
|
||||
payload = (unsigned char *)gtp_ipv4 +
|
||||
sizeof(struct ipv4_hdr);
|
||||
sizeof(struct rte_ipv4_hdr);
|
||||
} else if (cus_pctype->index ==
|
||||
I40E_CUSTOMIZED_GTPU_IPV6) {
|
||||
gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
|
||||
gtp_ipv6 = (struct ipv6_hdr *)
|
||||
gtp_ipv6 = (struct rte_ipv6_hdr *)
|
||||
((unsigned char *)gtp +
|
||||
sizeof(struct rte_flow_item_gtp));
|
||||
gtp_ipv6->vtc_flow =
|
||||
@ -1260,7 +1260,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
|
||||
gtp_ipv6->hop_limits =
|
||||
I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
|
||||
payload = (unsigned char *)gtp_ipv6 +
|
||||
sizeof(struct ipv6_hdr);
|
||||
sizeof(struct rte_ipv6_hdr);
|
||||
} else
|
||||
payload = (unsigned char *)gtp +
|
||||
sizeof(struct rte_flow_item_gtp);
|
||||
|
@ -2548,13 +2548,13 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
|
||||
attributes->l3.ipv4.hdr = (struct ipv4_hdr){
|
||||
attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
|
||||
.src_addr = input->flow.ip4_flow.src_ip,
|
||||
.dst_addr = input->flow.ip4_flow.dst_ip,
|
||||
.time_to_live = input->flow.ip4_flow.ttl,
|
||||
.type_of_service = input->flow.ip4_flow.tos,
|
||||
};
|
||||
attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
|
||||
attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
|
||||
.src_addr = mask->ipv4_mask.src_ip,
|
||||
.dst_addr = mask->ipv4_mask.dst_ip,
|
||||
.time_to_live = mask->ipv4_mask.ttl,
|
||||
@ -2570,7 +2570,7 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
|
||||
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
|
||||
attributes->l3.ipv6.hdr = (struct ipv6_hdr){
|
||||
attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
|
||||
.hop_limits = input->flow.ipv6_flow.hop_limits,
|
||||
.proto = input->flow.ipv6_flow.proto,
|
||||
};
|
||||
|
@ -1213,8 +1213,8 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
|
||||
{
|
||||
struct rte_ether_hdr *eth = NULL;
|
||||
struct rte_vlan_hdr *vlan = NULL;
|
||||
struct ipv4_hdr *ipv4 = NULL;
|
||||
struct ipv6_hdr *ipv6 = NULL;
|
||||
struct rte_ipv4_hdr *ipv4 = NULL;
|
||||
struct rte_ipv6_hdr *ipv6 = NULL;
|
||||
struct udp_hdr *udp = NULL;
|
||||
struct rte_vxlan_hdr *vxlan = NULL;
|
||||
struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
|
||||
@ -1250,7 +1250,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
|
||||
eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ipv4 = (struct ipv4_hdr *)&buf[temp_size];
|
||||
ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
|
||||
if (!vlan && !eth)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION,
|
||||
@ -1268,7 +1268,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
|
||||
ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ipv6 = (struct ipv6_hdr *)&buf[temp_size];
|
||||
ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
|
||||
if (!vlan && !eth)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION,
|
||||
|
@ -732,12 +732,12 @@ flow_tcf_pedit_key_set_dec_ttl(const struct rte_flow_action *actions,
|
||||
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
|
||||
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
|
||||
p_parser->keys[idx].off =
|
||||
offsetof(struct ipv4_hdr, time_to_live);
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live);
|
||||
}
|
||||
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
|
||||
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
|
||||
p_parser->keys[idx].off =
|
||||
offsetof(struct ipv6_hdr, hop_limits);
|
||||
offsetof(struct rte_ipv6_hdr, hop_limits);
|
||||
}
|
||||
if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
|
||||
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
|
||||
@ -801,8 +801,8 @@ flow_tcf_pedit_key_set_ipv6_addr(const struct rte_flow_action *actions,
|
||||
int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
|
||||
int off_base =
|
||||
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
|
||||
offsetof(struct ipv6_hdr, src_addr) :
|
||||
offsetof(struct ipv6_hdr, dst_addr);
|
||||
offsetof(struct rte_ipv6_hdr, src_addr) :
|
||||
offsetof(struct rte_ipv6_hdr, dst_addr);
|
||||
const struct rte_flow_action_set_ipv6 *conf =
|
||||
(const struct rte_flow_action_set_ipv6 *)actions->conf;
|
||||
|
||||
@ -836,8 +836,8 @@ flow_tcf_pedit_key_set_ipv4_addr(const struct rte_flow_action *actions,
|
||||
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
|
||||
p_parser->keys[idx].off =
|
||||
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
|
||||
offsetof(struct ipv4_hdr, src_addr) :
|
||||
offsetof(struct ipv4_hdr, dst_addr);
|
||||
offsetof(struct rte_ipv4_hdr, src_addr) :
|
||||
offsetof(struct rte_ipv4_hdr, dst_addr);
|
||||
p_parser->keys[idx].mask = ~UINT32_MAX;
|
||||
p_parser->keys[idx].val =
|
||||
((const struct rte_flow_action_set_ipv4 *)
|
||||
|
@ -1170,7 +1170,7 @@ mrvl_parse_ip6(const struct rte_flow_item *item,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
|
||||
struct ipv6_hdr zero;
|
||||
struct rte_ipv6_hdr zero;
|
||||
uint32_t flow_mask;
|
||||
int ret;
|
||||
|
||||
|
@ -457,8 +457,8 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
uint16_t *ether_type;
|
||||
uint8_t *raw_pkt;
|
||||
struct ipv4_hdr *ip;
|
||||
struct ipv6_hdr *ip6;
|
||||
struct rte_ipv4_hdr *ip;
|
||||
struct rte_ipv6_hdr *ip6;
|
||||
struct udp_hdr *udp;
|
||||
struct tcp_hdr *tcp;
|
||||
uint16_t len;
|
||||
@ -474,14 +474,14 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
|
||||
*ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
|
||||
switch (arfs->tuple.eth_proto) {
|
||||
case RTE_ETHER_TYPE_IPv4:
|
||||
ip = (struct ipv4_hdr *)raw_pkt;
|
||||
ip = (struct rte_ipv4_hdr *)raw_pkt;
|
||||
ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
|
||||
ip->total_length = sizeof(struct ipv4_hdr);
|
||||
ip->total_length = sizeof(struct rte_ipv4_hdr);
|
||||
ip->next_proto_id = arfs->tuple.ip_proto;
|
||||
ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
|
||||
ip->dst_addr = arfs->tuple.dst_ipv4;
|
||||
ip->src_addr = arfs->tuple.src_ipv4;
|
||||
len += sizeof(struct ipv4_hdr);
|
||||
len += sizeof(struct rte_ipv4_hdr);
|
||||
params->ipv4 = true;
|
||||
|
||||
raw_pkt = (uint8_t *)buff;
|
||||
@ -507,7 +507,7 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
|
||||
}
|
||||
break;
|
||||
case RTE_ETHER_TYPE_IPv6:
|
||||
ip6 = (struct ipv6_hdr *)raw_pkt;
|
||||
ip6 = (struct rte_ipv6_hdr *)raw_pkt;
|
||||
ip6->proto = arfs->tuple.ip_proto;
|
||||
ip6->vtc_flow =
|
||||
rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
|
||||
@ -516,7 +516,7 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
|
||||
IPV6_ADDR_LEN);
|
||||
rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
|
||||
IPV6_ADDR_LEN);
|
||||
len += sizeof(struct ipv6_hdr);
|
||||
len += sizeof(struct rte_ipv6_hdr);
|
||||
params->ipv6 = true;
|
||||
|
||||
raw_pkt = (uint8_t *)buff;
|
||||
|
@ -951,8 +951,8 @@ static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
|
||||
{
|
||||
uint32_t packet_type = RTE_PTYPE_UNKNOWN;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct rte_vlan_hdr *vlan_hdr;
|
||||
uint16_t ethertype;
|
||||
bool vlan_tagged = 0;
|
||||
@ -972,14 +972,16 @@ static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
|
||||
|
||||
if (ethertype == RTE_ETHER_TYPE_IPv4) {
|
||||
packet_type |= RTE_PTYPE_L3_IPV4;
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m,
|
||||
struct rte_ipv4_hdr *, len);
|
||||
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
|
||||
packet_type |= RTE_PTYPE_L4_TCP;
|
||||
else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
|
||||
packet_type |= RTE_PTYPE_L4_UDP;
|
||||
} else if (ethertype == RTE_ETHER_TYPE_IPv6) {
|
||||
packet_type |= RTE_PTYPE_L3_IPV6;
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m,
|
||||
struct rte_ipv6_hdr *, len);
|
||||
if (ipv6_hdr->proto == IPPROTO_TCP)
|
||||
packet_type |= RTE_PTYPE_L4_TCP;
|
||||
else if (ipv6_hdr->proto == IPPROTO_UDP)
|
||||
@ -1141,7 +1143,7 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
|
||||
static inline uint8_t
|
||||
qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
|
||||
{
|
||||
struct ipv4_hdr *ip;
|
||||
struct rte_ipv4_hdr *ip;
|
||||
uint16_t pkt_csum;
|
||||
uint16_t calc_csum;
|
||||
uint16_t val;
|
||||
@ -1152,7 +1154,7 @@ qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
|
||||
if (unlikely(val)) {
|
||||
m->packet_type = qede_rx_cqe_to_pkt_type(flag);
|
||||
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
||||
ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
pkt_csum = ip->hdr_checksum;
|
||||
ip->hdr_checksum = 0;
|
||||
|
@ -29,10 +29,10 @@ extern "C" {
|
||||
static inline uint16_t
|
||||
sfc_tso_ip4_get_ipid(const uint8_t *pkt_hdrp, size_t ip_hdr_off)
|
||||
{
|
||||
const struct ipv4_hdr *ip_hdrp;
|
||||
const struct rte_ipv4_hdr *ip_hdrp;
|
||||
uint16_t ipid;
|
||||
|
||||
ip_hdrp = (const struct ipv4_hdr *)(pkt_hdrp + ip_hdr_off);
|
||||
ip_hdrp = (const struct rte_ipv4_hdr *)(pkt_hdrp + ip_hdr_off);
|
||||
rte_memcpy(&ipid, &ip_hdrp->packet_id, sizeof(ipid));
|
||||
|
||||
return rte_be_to_cpu_16(ipid);
|
||||
|
@ -660,7 +660,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint8_t),
|
||||
.field_index = 0,
|
||||
.input_index = 0,
|
||||
.offset = offsetof(struct ipv4_hdr, next_proto_id),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
|
||||
/* Source IP address (IPv4) */
|
||||
@ -669,7 +669,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 1,
|
||||
.input_index = 1,
|
||||
.offset = offsetof(struct ipv4_hdr, src_addr),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, src_addr),
|
||||
},
|
||||
|
||||
/* Destination IP address (IPv4) */
|
||||
@ -678,7 +678,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 2,
|
||||
.input_index = 2,
|
||||
.offset = offsetof(struct ipv4_hdr, dst_addr),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, dst_addr),
|
||||
},
|
||||
|
||||
/* Source Port */
|
||||
@ -687,7 +687,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 3,
|
||||
.input_index = 3,
|
||||
.offset = sizeof(struct ipv4_hdr) +
|
||||
.offset = sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, src_port),
|
||||
},
|
||||
|
||||
@ -697,7 +697,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 4,
|
||||
.input_index = 3,
|
||||
.offset = sizeof(struct ipv4_hdr) +
|
||||
.offset = sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, dst_port),
|
||||
},
|
||||
};
|
||||
@ -709,7 +709,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint8_t),
|
||||
.field_index = 0,
|
||||
.input_index = 0,
|
||||
.offset = offsetof(struct ipv6_hdr, proto),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, proto),
|
||||
},
|
||||
|
||||
/* Source IP address (IPv6) */
|
||||
@ -718,7 +718,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 1,
|
||||
.input_index = 1,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[0]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[0]),
|
||||
},
|
||||
|
||||
[2] = {
|
||||
@ -726,7 +726,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 2,
|
||||
.input_index = 2,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[4]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[4]),
|
||||
},
|
||||
|
||||
[3] = {
|
||||
@ -734,7 +734,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 3,
|
||||
.input_index = 3,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[8]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[8]),
|
||||
},
|
||||
|
||||
[4] = {
|
||||
@ -742,7 +742,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 4,
|
||||
.input_index = 4,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[12]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[12]),
|
||||
},
|
||||
|
||||
/* Destination IP address (IPv6) */
|
||||
@ -751,7 +751,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 5,
|
||||
.input_index = 5,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[0]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]),
|
||||
},
|
||||
|
||||
[6] = {
|
||||
@ -759,7 +759,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 6,
|
||||
.input_index = 6,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[4]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]),
|
||||
},
|
||||
|
||||
[7] = {
|
||||
@ -767,7 +767,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 7,
|
||||
.input_index = 7,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[8]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]),
|
||||
},
|
||||
|
||||
[8] = {
|
||||
@ -775,7 +775,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 8,
|
||||
.input_index = 8,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[12]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]),
|
||||
},
|
||||
|
||||
/* Source Port */
|
||||
@ -784,7 +784,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 9,
|
||||
.input_index = 9,
|
||||
.offset = sizeof(struct ipv6_hdr) +
|
||||
.offset = sizeof(struct rte_ipv6_hdr) +
|
||||
offsetof(struct tcp_hdr, src_port),
|
||||
},
|
||||
|
||||
@ -794,7 +794,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 10,
|
||||
.input_index = 9,
|
||||
.offset = sizeof(struct ipv6_hdr) +
|
||||
.offset = sizeof(struct rte_ipv6_hdr) +
|
||||
offsetof(struct tcp_hdr, dst_port),
|
||||
},
|
||||
};
|
||||
|
@ -272,12 +272,12 @@ tap_verify_csum(struct rte_mbuf *mbuf)
|
||||
else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
|
||||
l2_len += 8;
|
||||
/* Don't verify checksum for packets with discontinuous L2 header */
|
||||
if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
|
||||
if (unlikely(l2_len + sizeof(struct rte_ipv4_hdr) >
|
||||
rte_pktmbuf_data_len(mbuf)))
|
||||
return;
|
||||
l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
|
||||
if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
|
||||
struct ipv4_hdr *iph = l3_hdr;
|
||||
struct rte_ipv4_hdr *iph = l3_hdr;
|
||||
|
||||
/* ihl contains the number of 4-byte words in the header */
|
||||
l3_len = 4 * (iph->version_ihl & 0xf);
|
||||
@ -295,9 +295,9 @@ tap_verify_csum(struct rte_mbuf *mbuf)
|
||||
PKT_RX_IP_CKSUM_BAD :
|
||||
PKT_RX_IP_CKSUM_GOOD;
|
||||
} else if (l3 == RTE_PTYPE_L3_IPV6) {
|
||||
struct ipv6_hdr *iph = l3_hdr;
|
||||
struct rte_ipv6_hdr *iph = l3_hdr;
|
||||
|
||||
l3_len = sizeof(struct ipv6_hdr);
|
||||
l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
/* check that the total length reported by header is not
|
||||
* greater than the total received size
|
||||
*/
|
||||
@ -496,7 +496,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
|
||||
void *l3_hdr = packet + l2_len;
|
||||
|
||||
if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
|
||||
struct ipv4_hdr *iph = l3_hdr;
|
||||
struct rte_ipv4_hdr *iph = l3_hdr;
|
||||
uint16_t cksum;
|
||||
|
||||
iph->hdr_checksum = 0;
|
||||
|
@ -479,13 +479,14 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
|
||||
/* common case: header is not fragmented */
|
||||
if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
|
||||
m->l4_len)) {
|
||||
struct ipv4_hdr *iph;
|
||||
struct ipv6_hdr *ip6h;
|
||||
struct rte_ipv4_hdr *iph;
|
||||
struct rte_ipv6_hdr *ip6h;
|
||||
struct tcp_hdr *th;
|
||||
uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
|
||||
uint32_t tmp;
|
||||
|
||||
iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
|
||||
iph = rte_pktmbuf_mtod_offset(m,
|
||||
struct rte_ipv4_hdr *, m->l2_len);
|
||||
th = RTE_PTR_ADD(iph, m->l3_len);
|
||||
if ((iph->version_ihl >> 4) == 4) {
|
||||
iph->hdr_checksum = 0;
|
||||
@ -494,7 +495,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
|
||||
ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
|
||||
m->l3_len);
|
||||
} else {
|
||||
ip6h = (struct ipv6_hdr *)iph;
|
||||
ip6h = (struct rte_ipv6_hdr *)iph;
|
||||
ip_paylen = ip6h->payload_len;
|
||||
}
|
||||
|
||||
|
@ -667,8 +667,8 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
|
||||
struct rte_mbuf *rxm)
|
||||
{
|
||||
uint32_t hlen, slen;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
char *ptr;
|
||||
|
||||
@ -679,20 +679,20 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
|
||||
hlen = sizeof(struct rte_ether_hdr);
|
||||
|
||||
if (rcd->v4) {
|
||||
if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
|
||||
return hw->mtu - sizeof(struct ipv4_hdr)
|
||||
if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr)))
|
||||
return hw->mtu - sizeof(struct rte_ipv4_hdr)
|
||||
- sizeof(struct tcp_hdr);
|
||||
|
||||
ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen);
|
||||
hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
|
||||
IPV4_IHL_MULTIPLIER;
|
||||
} else if (rcd->v6) {
|
||||
if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
|
||||
return hw->mtu - sizeof(struct ipv6_hdr) -
|
||||
if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr)))
|
||||
return hw->mtu - sizeof(struct rte_ipv6_hdr) -
|
||||
sizeof(struct tcp_hdr);
|
||||
|
||||
ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
|
||||
hlen += sizeof(struct ipv6_hdr);
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen);
|
||||
hlen += sizeof(struct rte_ipv6_hdr);
|
||||
if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
|
||||
int frag;
|
||||
|
||||
|
@ -341,7 +341,7 @@ static int lcore_main(__attribute__((unused)) void *arg1)
|
||||
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct rte_arp_hdr *arp_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint16_t ether_type, offset;
|
||||
|
||||
uint16_t rx_cnt;
|
||||
@ -409,7 +409,7 @@ static int lcore_main(__attribute__((unused)) void *arg1)
|
||||
global_flag_stru_p->port_packets[2]++;
|
||||
rte_spinlock_unlock(&global_flag_stru_p->lock);
|
||||
}
|
||||
ipv4_hdr = (struct ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
|
||||
if (ipv4_hdr->dst_addr == bond_ip) {
|
||||
rte_ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
|
||||
rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
|
||||
|
@ -99,7 +99,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = PROTO_FIELD_IPV4,
|
||||
.input_index = PROTO_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, next_proto_id),
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
/* next input field (IPv4 source address) - 4 consecutive bytes. */
|
||||
{
|
||||
@ -109,7 +109,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = SRC_FIELD_IPV4,
|
||||
.input_index = SRC_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, src_addr),
|
||||
offsetof(struct rte_ipv4_hdr, src_addr),
|
||||
},
|
||||
/* next input field (IPv4 destination address) - 4 consecutive bytes. */
|
||||
{
|
||||
@ -119,7 +119,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = DST_FIELD_IPV4,
|
||||
.input_index = DST_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, dst_addr),
|
||||
offsetof(struct rte_ipv4_hdr, dst_addr),
|
||||
},
|
||||
/*
|
||||
* Next 2 fields (src & dst ports) form 4 consecutive bytes.
|
||||
@ -132,7 +132,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = SRCP_FIELD_IPV4,
|
||||
.input_index = SRCP_DESTP_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, src_port),
|
||||
},
|
||||
{
|
||||
@ -142,7 +142,7 @@ static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.field_index = DSTP_FIELD_IPV4,
|
||||
.input_index = SRCP_DESTP_INPUT_IPV4,
|
||||
.offset = sizeof(struct rte_ether_hdr) +
|
||||
sizeof(struct ipv4_hdr) +
|
||||
sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, dst_port),
|
||||
},
|
||||
};
|
||||
|
@ -66,8 +66,8 @@
|
||||
/*
|
||||
* Default payload in bytes for the IPv6 packet.
|
||||
*/
|
||||
#define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct ipv4_hdr))
|
||||
#define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct ipv6_hdr))
|
||||
#define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct rte_ipv4_hdr))
|
||||
#define IPV6_DEFAULT_PAYLOAD (IPV6_MTU_DEFAULT - sizeof(struct rte_ipv6_hdr))
|
||||
|
||||
/*
|
||||
* Max number of fragments per packet expected - defined by config file.
|
||||
@ -260,10 +260,10 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
|
||||
|
||||
/* if this is an IPv4 packet */
|
||||
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
uint32_t ip_dst;
|
||||
/* Read the lookup key (i.e. ip_dst) from the input packet */
|
||||
ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *);
|
||||
ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
|
||||
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
|
||||
|
||||
/* Find destination port */
|
||||
@ -295,12 +295,12 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
|
||||
}
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
|
||||
/* if this is an IPv6 packet */
|
||||
struct ipv6_hdr *ip_hdr;
|
||||
struct rte_ipv6_hdr *ip_hdr;
|
||||
|
||||
ipv6 = 1;
|
||||
|
||||
/* Read the lookup key (i.e. ip_dst) from the input packet */
|
||||
ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *);
|
||||
ip_hdr = rte_pktmbuf_mtod(m, struct rte_ipv6_hdr *);
|
||||
|
||||
/* Find destination port */
|
||||
if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
|
||||
|
@ -636,7 +636,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint8_t),
|
||||
.field_index = 0,
|
||||
.input_index = 0,
|
||||
.offset = offsetof(struct ipv4_hdr, next_proto_id),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
|
||||
/* Source IP address (IPv4) */
|
||||
@ -645,7 +645,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 1,
|
||||
.input_index = 1,
|
||||
.offset = offsetof(struct ipv4_hdr, src_addr),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, src_addr),
|
||||
},
|
||||
|
||||
/* Destination IP address (IPv4) */
|
||||
@ -654,7 +654,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 2,
|
||||
.input_index = 2,
|
||||
.offset = offsetof(struct ipv4_hdr, dst_addr),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, dst_addr),
|
||||
},
|
||||
|
||||
/* Source Port */
|
||||
@ -663,7 +663,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 3,
|
||||
.input_index = 3,
|
||||
.offset = sizeof(struct ipv4_hdr) +
|
||||
.offset = sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, src_port),
|
||||
},
|
||||
|
||||
@ -673,7 +673,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 4,
|
||||
.input_index = 3,
|
||||
.offset = sizeof(struct ipv4_hdr) +
|
||||
.offset = sizeof(struct rte_ipv4_hdr) +
|
||||
offsetof(struct tcp_hdr, dst_port),
|
||||
},
|
||||
};
|
||||
@ -685,7 +685,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint8_t),
|
||||
.field_index = 0,
|
||||
.input_index = 0,
|
||||
.offset = offsetof(struct ipv6_hdr, proto),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, proto),
|
||||
},
|
||||
|
||||
/* Source IP address (IPv6) */
|
||||
@ -694,7 +694,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 1,
|
||||
.input_index = 1,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[0]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[0]),
|
||||
},
|
||||
|
||||
[2] = {
|
||||
@ -702,7 +702,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 2,
|
||||
.input_index = 2,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[4]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[4]),
|
||||
},
|
||||
|
||||
[3] = {
|
||||
@ -710,7 +710,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 3,
|
||||
.input_index = 3,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[8]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[8]),
|
||||
},
|
||||
|
||||
[4] = {
|
||||
@ -718,7 +718,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 4,
|
||||
.input_index = 4,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr[12]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr[12]),
|
||||
},
|
||||
|
||||
/* Destination IP address (IPv6) */
|
||||
@ -727,7 +727,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 5,
|
||||
.input_index = 5,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[0]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]),
|
||||
},
|
||||
|
||||
[6] = {
|
||||
@ -735,7 +735,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 6,
|
||||
.input_index = 6,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[4]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]),
|
||||
},
|
||||
|
||||
[7] = {
|
||||
@ -743,7 +743,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 7,
|
||||
.input_index = 7,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[8]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]),
|
||||
},
|
||||
|
||||
[8] = {
|
||||
@ -751,7 +751,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = 8,
|
||||
.input_index = 8,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr[12]),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]),
|
||||
},
|
||||
|
||||
/* Source Port */
|
||||
@ -760,7 +760,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 9,
|
||||
.input_index = 9,
|
||||
.offset = sizeof(struct ipv6_hdr) +
|
||||
.offset = sizeof(struct rte_ipv6_hdr) +
|
||||
offsetof(struct tcp_hdr, src_port),
|
||||
},
|
||||
|
||||
@ -770,7 +770,7 @@ static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = 10,
|
||||
.input_index = 9,
|
||||
.offset = sizeof(struct ipv6_hdr) +
|
||||
.offset = sizeof(struct rte_ipv6_hdr) +
|
||||
offsetof(struct tcp_hdr, dst_port),
|
||||
},
|
||||
};
|
||||
|
@ -324,10 +324,10 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
|
||||
|
||||
/* if packet is IPv4 */
|
||||
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
uint32_t ip_dst;
|
||||
|
||||
ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
|
||||
/* if it is a fragmented packet, then try to reassemble. */
|
||||
if (rte_ipv4_frag_pkt_is_fragmented(ip_hdr)) {
|
||||
@ -351,7 +351,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
|
||||
m = mo;
|
||||
eth_hdr = rte_pktmbuf_mtod(m,
|
||||
struct rte_ether_hdr *);
|
||||
ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
}
|
||||
}
|
||||
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
|
||||
@ -366,9 +366,9 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
|
||||
/* if packet is IPv6 */
|
||||
struct ipv6_extension_fragment *frag_hdr;
|
||||
struct ipv6_hdr *ip_hdr;
|
||||
struct rte_ipv6_hdr *ip_hdr;
|
||||
|
||||
ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
|
||||
ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
|
||||
|
||||
frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(ip_hdr);
|
||||
|
||||
@ -390,7 +390,7 @@ reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
|
||||
m = mo;
|
||||
eth_hdr = rte_pktmbuf_mtod(m,
|
||||
struct rte_ether_hdr *);
|
||||
ip_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
|
||||
ip_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -341,7 +341,7 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port,
|
||||
|
||||
/* calculate IPv4 cksum in SW */
|
||||
if ((pkt->ol_flags & PKT_TX_IP_CKSUM) == 0)
|
||||
ip->ip_sum = rte_ipv4_cksum((struct ipv4_hdr *)ip);
|
||||
ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip);
|
||||
|
||||
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
} else {
|
||||
|
@ -967,7 +967,7 @@ get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir)
|
||||
|
||||
static int
|
||||
fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
|
||||
const struct ipv4_hdr *v4, struct ipv6_hdr *v6)
|
||||
const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
|
||||
{
|
||||
int32_t rc;
|
||||
|
||||
@ -1038,7 +1038,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
|
||||
{
|
||||
int rc;
|
||||
struct rte_ipsec_sa_prm prm;
|
||||
struct ipv4_hdr v4 = {
|
||||
struct rte_ipv4_hdr v4 = {
|
||||
.version_ihl = IPVERSION << 4 |
|
||||
sizeof(v4) / IPV4_IHL_MULTIPLIER,
|
||||
.time_to_live = IPDEFTTL,
|
||||
@ -1046,7 +1046,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
|
||||
.src_addr = lsa->src.ip.ip4,
|
||||
.dst_addr = lsa->dst.ip.ip4,
|
||||
};
|
||||
struct ipv6_hdr v6 = {
|
||||
struct rte_ipv6_hdr v6 = {
|
||||
.vtc_flow = htonl(IP6_VERSION << 28),
|
||||
.proto = IPPROTO_ESP,
|
||||
};
|
||||
|
@ -297,7 +297,7 @@ static inline void
|
||||
mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
|
||||
{
|
||||
struct rte_mbuf *mc;
|
||||
struct ipv4_hdr *iphdr;
|
||||
struct rte_ipv4_hdr *iphdr;
|
||||
uint32_t dest_addr, port_mask, port_num, use_clone;
|
||||
int32_t hash;
|
||||
uint16_t port;
|
||||
@ -307,7 +307,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
|
||||
} dst_eth_addr;
|
||||
|
||||
/* Remove the Ethernet header from the input packet */
|
||||
iphdr = (struct ipv4_hdr *)
|
||||
iphdr = (struct rte_ipv4_hdr *)
|
||||
rte_pktmbuf_adj(m, (uint16_t)sizeof(struct rte_ether_hdr));
|
||||
RTE_ASSERT(iphdr != NULL);
|
||||
|
||||
|
@ -388,7 +388,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
|
||||
struct l2fwd_crypto_params *cparams)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
|
||||
uint32_t ipdata_offset, data_len;
|
||||
uint32_t pad_len = 0;
|
||||
@ -401,7 +401,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
|
||||
|
||||
ipdata_offset = sizeof(struct rte_ether_hdr);
|
||||
|
||||
ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
|
||||
ip_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
|
||||
ipdata_offset);
|
||||
|
||||
ipdata_offset += (ip_hdr->version_ihl & IPV4_HDR_IHL_MASK)
|
||||
|
@ -146,7 +146,7 @@ static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
|
||||
/***********************start of ACL part******************************/
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
static inline int
|
||||
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len);
|
||||
is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len);
|
||||
#endif
|
||||
static inline void
|
||||
send_single_packet(struct rte_mbuf *m, uint16_t port);
|
||||
@ -174,8 +174,8 @@ send_single_packet(struct rte_mbuf *m, uint16_t port);
|
||||
*d = (unsigned char)(ip & 0xff);\
|
||||
} while (0)
|
||||
#define OFF_ETHHEAD (sizeof(struct rte_ether_hdr))
|
||||
#define OFF_IPV42PROTO (offsetof(struct ipv4_hdr, next_proto_id))
|
||||
#define OFF_IPV62PROTO (offsetof(struct ipv6_hdr, proto))
|
||||
#define OFF_IPV42PROTO (offsetof(struct rte_ipv4_hdr, next_proto_id))
|
||||
#define OFF_IPV62PROTO (offsetof(struct rte_ipv6_hdr, proto))
|
||||
#define MBUF_IPV4_2PROTO(m) \
|
||||
rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV42PROTO)
|
||||
#define MBUF_IPV6_2PROTO(m) \
|
||||
@ -252,32 +252,32 @@ struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = SRC_FIELD_IPV4,
|
||||
.input_index = RTE_ACL_IPV4VLAN_SRC,
|
||||
.offset = offsetof(struct ipv4_hdr, src_addr) -
|
||||
offsetof(struct ipv4_hdr, next_proto_id),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, src_addr) -
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = DST_FIELD_IPV4,
|
||||
.input_index = RTE_ACL_IPV4VLAN_DST,
|
||||
.offset = offsetof(struct ipv4_hdr, dst_addr) -
|
||||
offsetof(struct ipv4_hdr, next_proto_id),
|
||||
.offset = offsetof(struct rte_ipv4_hdr, dst_addr) -
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_RANGE,
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = SRCP_FIELD_IPV4,
|
||||
.input_index = RTE_ACL_IPV4VLAN_PORTS,
|
||||
.offset = sizeof(struct ipv4_hdr) -
|
||||
offsetof(struct ipv4_hdr, next_proto_id),
|
||||
.offset = sizeof(struct rte_ipv4_hdr) -
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_RANGE,
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = DSTP_FIELD_IPV4,
|
||||
.input_index = RTE_ACL_IPV4VLAN_PORTS,
|
||||
.offset = sizeof(struct ipv4_hdr) -
|
||||
offsetof(struct ipv4_hdr, next_proto_id) +
|
||||
.offset = sizeof(struct rte_ipv4_hdr) -
|
||||
offsetof(struct rte_ipv4_hdr, next_proto_id) +
|
||||
sizeof(uint16_t),
|
||||
},
|
||||
};
|
||||
@ -314,80 +314,84 @@ struct rte_acl_field_def ipv6_defs[NUM_FIELDS_IPV6] = {
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = SRC1_FIELD_IPV6,
|
||||
.input_index = SRC1_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr) -
|
||||
offsetof(struct ipv6_hdr, proto),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = SRC2_FIELD_IPV6,
|
||||
.input_index = SRC2_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr) -
|
||||
offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto) + sizeof(uint32_t),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = SRC3_FIELD_IPV6,
|
||||
.input_index = SRC3_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr) -
|
||||
offsetof(struct ipv6_hdr, proto) + 2 * sizeof(uint32_t),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto) +
|
||||
2 * sizeof(uint32_t),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = SRC4_FIELD_IPV6,
|
||||
.input_index = SRC4_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, src_addr) -
|
||||
offsetof(struct ipv6_hdr, proto) + 3 * sizeof(uint32_t),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, src_addr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto) +
|
||||
3 * sizeof(uint32_t),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = DST1_FIELD_IPV6,
|
||||
.input_index = DST1_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr)
|
||||
- offsetof(struct ipv6_hdr, proto),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr)
|
||||
- offsetof(struct rte_ipv6_hdr, proto),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = DST2_FIELD_IPV6,
|
||||
.input_index = DST2_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr) -
|
||||
offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto) + sizeof(uint32_t),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = DST3_FIELD_IPV6,
|
||||
.input_index = DST3_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr) -
|
||||
offsetof(struct ipv6_hdr, proto) + 2 * sizeof(uint32_t),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto) +
|
||||
2 * sizeof(uint32_t),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_MASK,
|
||||
.size = sizeof(uint32_t),
|
||||
.field_index = DST4_FIELD_IPV6,
|
||||
.input_index = DST4_FIELD_IPV6,
|
||||
.offset = offsetof(struct ipv6_hdr, dst_addr) -
|
||||
offsetof(struct ipv6_hdr, proto) + 3 * sizeof(uint32_t),
|
||||
.offset = offsetof(struct rte_ipv6_hdr, dst_addr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto) +
|
||||
3 * sizeof(uint32_t),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_RANGE,
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = SRCP_FIELD_IPV6,
|
||||
.input_index = SRCP_FIELD_IPV6,
|
||||
.offset = sizeof(struct ipv6_hdr) -
|
||||
offsetof(struct ipv6_hdr, proto),
|
||||
.offset = sizeof(struct rte_ipv6_hdr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto),
|
||||
},
|
||||
{
|
||||
.type = RTE_ACL_FIELD_TYPE_RANGE,
|
||||
.size = sizeof(uint16_t),
|
||||
.field_index = DSTP_FIELD_IPV6,
|
||||
.input_index = SRCP_FIELD_IPV6,
|
||||
.offset = sizeof(struct ipv6_hdr) -
|
||||
offsetof(struct ipv6_hdr, proto) + sizeof(uint16_t),
|
||||
.offset = sizeof(struct rte_ipv6_hdr) -
|
||||
offsetof(struct rte_ipv6_hdr, proto) + sizeof(uint16_t),
|
||||
},
|
||||
};
|
||||
|
||||
@ -542,8 +546,8 @@ dump_acl4_rule(struct rte_mbuf *m, uint32_t sig)
|
||||
{
|
||||
uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
|
||||
unsigned char a, b, c, d;
|
||||
struct ipv4_hdr *ipv4_hdr =
|
||||
rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
struct rte_ipv4_hdr *ipv4_hdr =
|
||||
rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
uint32_t_to_char(rte_bswap32(ipv4_hdr->src_addr), &a, &b, &c, &d);
|
||||
@ -566,8 +570,8 @@ dump_acl6_rule(struct rte_mbuf *m, uint32_t sig)
|
||||
{
|
||||
unsigned i;
|
||||
uint32_t offset = sig & ~ACL_DENY_SIGNATURE;
|
||||
struct ipv6_hdr *ipv6_hdr =
|
||||
rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
struct rte_ipv6_hdr *ipv6_hdr =
|
||||
rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
printf("Packet Src");
|
||||
@ -620,11 +624,11 @@ static inline void
|
||||
prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl,
|
||||
int index)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_mbuf *pkt = pkts_in[index];
|
||||
|
||||
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
/* Check to make sure the packet is valid (RFC1812) */
|
||||
@ -1281,14 +1285,14 @@ send_single_packet(struct rte_mbuf *m, uint16_t port)
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
static inline int
|
||||
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
|
||||
{
|
||||
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
|
||||
/*
|
||||
* 1. The packet length reported by the Link Layer must be large
|
||||
* enough to hold the minimum length legal IP datagram (20 bytes).
|
||||
*/
|
||||
if (link_len < sizeof(struct ipv4_hdr))
|
||||
if (link_len < sizeof(struct rte_ipv4_hdr))
|
||||
return -1;
|
||||
|
||||
/* 2. The IP checksum must be correct. */
|
||||
@ -1313,7 +1317,7 @@ is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
* datagram header, whose length is specified in the IP header length
|
||||
* field.
|
||||
*/
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
|
||||
return -5;
|
||||
|
||||
return 0;
|
||||
|
@ -466,14 +466,14 @@ send_single_packet(struct rte_mbuf *m, uint16_t port)
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
static inline int
|
||||
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
|
||||
{
|
||||
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
|
||||
/*
|
||||
* 1. The packet length reported by the Link Layer must be large
|
||||
* enough to hold the minimum length legal IP datagram (20 bytes).
|
||||
*/
|
||||
if (link_len < sizeof(struct ipv4_hdr))
|
||||
if (link_len < sizeof(struct rte_ipv4_hdr))
|
||||
return -1;
|
||||
|
||||
/* 2. The IP checksum must be correct. */
|
||||
@ -498,7 +498,7 @@ is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
* datagram header, whose length is specified in the IP header length
|
||||
* field.
|
||||
*/
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
|
||||
return -5;
|
||||
|
||||
return 0;
|
||||
@ -523,7 +523,7 @@ print_ipv6_key(struct ipv6_5tuple key)
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
lookup_struct_t * ipv4_l3fwd_lookup_struct)
|
||||
{
|
||||
struct ipv4_5tuple key;
|
||||
@ -538,14 +538,14 @@ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
switch (ipv4_hdr->next_proto_id) {
|
||||
case IPPROTO_TCP:
|
||||
tcp = (struct tcp_hdr *)((unsigned char *)ipv4_hdr +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
|
||||
key.port_src = rte_be_to_cpu_16(tcp->src_port);
|
||||
break;
|
||||
|
||||
case IPPROTO_UDP:
|
||||
udp = (struct udp_hdr *)((unsigned char *)ipv4_hdr +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
|
||||
key.port_src = rte_be_to_cpu_16(udp->src_port);
|
||||
break;
|
||||
@ -562,7 +562,7 @@ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
|
||||
get_ipv6_dst_port(struct rte_ipv6_hdr *ipv6_hdr, uint16_t portid,
|
||||
lookup_struct_t *ipv6_l3fwd_lookup_struct)
|
||||
{
|
||||
struct ipv6_5tuple key;
|
||||
@ -578,14 +578,14 @@ get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
|
||||
switch (ipv6_hdr->proto) {
|
||||
case IPPROTO_TCP:
|
||||
tcp = (struct tcp_hdr *)((unsigned char *) ipv6_hdr +
|
||||
sizeof(struct ipv6_hdr));
|
||||
sizeof(struct rte_ipv6_hdr));
|
||||
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
|
||||
key.port_src = rte_be_to_cpu_16(tcp->src_port);
|
||||
break;
|
||||
|
||||
case IPPROTO_UDP:
|
||||
udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
|
||||
sizeof(struct ipv6_hdr));
|
||||
sizeof(struct rte_ipv6_hdr));
|
||||
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
|
||||
key.port_src = rte_be_to_cpu_16(udp->src_port);
|
||||
break;
|
||||
@ -604,7 +604,7 @@ get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
|
||||
|
||||
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
|
||||
static inline uint16_t
|
||||
get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
lookup_struct_t *ipv4_l3fwd_lookup_struct)
|
||||
{
|
||||
uint32_t next_hop;
|
||||
@ -662,7 +662,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
struct lcore_conf *qconf)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
void *d_addr_bytes;
|
||||
uint16_t dst_port;
|
||||
|
||||
@ -671,7 +671,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
||||
/* Handle IPv4 headers.*/
|
||||
ipv4_hdr =
|
||||
rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
@ -707,10 +707,10 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
|
||||
/* Handle IPv6 headers.*/
|
||||
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
|
||||
ipv6_hdr =
|
||||
rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
dst_port = get_ipv6_dst_port(ipv6_hdr, portid,
|
||||
|
@ -314,14 +314,14 @@ send_single_packet(struct rte_mbuf *m, uint16_t port)
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
static inline int
|
||||
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
|
||||
{
|
||||
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
|
||||
/*
|
||||
* 1. The packet length reported by the Link Layer must be large
|
||||
* enough to hold the minimum length legal IP datagram (20 bytes).
|
||||
*/
|
||||
if (link_len < sizeof(struct ipv4_hdr))
|
||||
if (link_len < sizeof(struct rte_ipv4_hdr))
|
||||
return -1;
|
||||
|
||||
/* 2. The IP checksum must be correct. */
|
||||
@ -346,7 +346,7 @@ is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
* datagram header, whose length is specified in the IP header length
|
||||
* field.
|
||||
*/
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
|
||||
return -5;
|
||||
|
||||
return 0;
|
||||
@ -362,7 +362,7 @@ print_key(struct ipv4_5tuple key)
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
lookup_struct_t *l3fwd_lookup_struct)
|
||||
{
|
||||
struct ipv4_5tuple key;
|
||||
@ -377,14 +377,14 @@ get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
switch (ipv4_hdr->next_proto_id) {
|
||||
case IPPROTO_TCP:
|
||||
tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
|
||||
key.port_src = rte_be_to_cpu_16(tcp->src_port);
|
||||
break;
|
||||
|
||||
case IPPROTO_UDP:
|
||||
udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
|
||||
key.port_src = rte_be_to_cpu_16(udp->src_port);
|
||||
break;
|
||||
@ -402,7 +402,7 @@ get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
|
||||
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
|
||||
static inline uint32_t
|
||||
get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
|
||||
lookup_struct_t *l3fwd_lookup_struct)
|
||||
{
|
||||
uint32_t next_hop;
|
||||
@ -418,13 +418,13 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
lookup_struct_t *l3fwd_lookup_struct)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
void *tmp;
|
||||
uint16_t dst_port;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
|
||||
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
|
@ -130,14 +130,14 @@ send_single_packet(struct lcore_conf *qconf,
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
static inline int
|
||||
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
|
||||
{
|
||||
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
|
||||
/*
|
||||
* 1. The packet length reported by the Link Layer must be large
|
||||
* enough to hold the minimum length legal IP datagram (20 bytes).
|
||||
*/
|
||||
if (link_len < sizeof(struct ipv4_hdr))
|
||||
if (link_len < sizeof(struct rte_ipv4_hdr))
|
||||
return -1;
|
||||
|
||||
/* 2. The IP checksum must be correct. */
|
||||
@ -162,7 +162,7 @@ is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
* datagram header, whose length is specified in the IP header length
|
||||
* field.
|
||||
*/
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
|
||||
return -5;
|
||||
|
||||
return 0;
|
||||
|
@ -68,14 +68,18 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
|
||||
*p[2] = te[2];
|
||||
*p[3] = te[3];
|
||||
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
|
||||
&dst_port[0], pkt[0]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
|
||||
&dst_port[1], pkt[1]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
|
||||
&dst_port[2], pkt[2]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
|
||||
&dst_port[3], pkt[3]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[0] + 1),
|
||||
&dst_port[0], pkt[0]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[1] + 1),
|
||||
&dst_port[1], pkt[1]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[2] + 1),
|
||||
&dst_port[2], pkt[2]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[3] + 1),
|
||||
&dst_port[3], pkt[3]->packet_type);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -129,7 +133,7 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
|
||||
te = *(vector unsigned int *)eth_hdr;
|
||||
ve = (vector unsigned int)val_eth[dst_port[0]];
|
||||
|
||||
rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
|
||||
rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
|
||||
pkt->packet_type);
|
||||
|
||||
/* dynamically vec_sel te and ve for MASK_ETH (0x3f) */
|
||||
|
@ -14,7 +14,7 @@
|
||||
#define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
|
||||
|
||||
/* Minimum value of IPV4 total length (20B) in network byte order. */
|
||||
#define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
|
||||
#define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8)
|
||||
|
||||
/*
|
||||
* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
|
||||
@ -28,7 +28,7 @@
|
||||
* to BAD_PORT value.
|
||||
*/
|
||||
static __rte_always_inline void
|
||||
rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
|
||||
rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
|
||||
{
|
||||
uint8_t ihl;
|
||||
|
||||
|
@ -252,7 +252,8 @@ em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
|
||||
struct rte_hash *ipv4_l3fwd_lookup_struct =
|
||||
(struct rte_hash *)lookup_struct;
|
||||
|
||||
ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
|
||||
ipv4_hdr = (uint8_t *)ipv4_hdr +
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live);
|
||||
|
||||
/*
|
||||
* Get 5 tuple: dst port, src port, dst IP address,
|
||||
@ -273,7 +274,8 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
|
||||
struct rte_hash *ipv6_l3fwd_lookup_struct =
|
||||
(struct rte_hash *)lookup_struct;
|
||||
|
||||
ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
|
||||
ipv6_hdr = (uint8_t *)ipv6_hdr +
|
||||
offsetof(struct rte_ipv6_hdr, payload_len);
|
||||
void *data0 = ipv6_hdr;
|
||||
void *data1 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t);
|
||||
void *data2 = ((uint8_t *)ipv6_hdr) + sizeof(xmm_t) + sizeof(xmm_t);
|
||||
@ -566,17 +568,17 @@ em_parse_ptype(struct rte_mbuf *m)
|
||||
uint16_t ether_type;
|
||||
void *l3;
|
||||
int hdr_len;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
|
||||
ether_type = eth_hdr->ether_type;
|
||||
l3 = (uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr);
|
||||
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
|
||||
ipv4_hdr = (struct ipv4_hdr *)l3;
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)l3;
|
||||
hdr_len = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
|
||||
IPV4_IHL_MULTIPLIER;
|
||||
if (hdr_len == sizeof(struct ipv4_hdr)) {
|
||||
if (hdr_len == sizeof(struct rte_ipv4_hdr)) {
|
||||
packet_type |= RTE_PTYPE_L3_IPV4;
|
||||
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
|
||||
packet_type |= RTE_PTYPE_L4_TCP;
|
||||
@ -585,7 +587,7 @@ em_parse_ptype(struct rte_mbuf *m)
|
||||
} else
|
||||
packet_type |= RTE_PTYPE_L3_IPV4_EXT;
|
||||
} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
|
||||
ipv6_hdr = (struct ipv6_hdr *)l3;
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)l3;
|
||||
if (ipv6_hdr->proto == IPPROTO_TCP)
|
||||
packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
|
||||
else if (ipv6_hdr->proto == IPPROTO_UDP)
|
||||
|
@ -10,7 +10,7 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
struct lcore_conf *qconf)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint16_t dst_port;
|
||||
uint32_t tcp_or_udp;
|
||||
uint32_t l3_ptypes;
|
||||
@ -21,7 +21,7 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
|
||||
if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
|
||||
/* Handle IPv4 headers.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
@ -53,9 +53,9 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
send_single_packet(qconf, m, dst_port);
|
||||
} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
|
||||
/* Handle IPv6 headers.*/
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
dst_port = em_get_ipv6_dst_port(ipv6_hdr, portid,
|
||||
|
@ -80,8 +80,8 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
uint16_t portid)
|
||||
{
|
||||
uint16_t next_hop;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
uint32_t tcp_or_udp;
|
||||
uint32_t l3_ptypes;
|
||||
|
||||
@ -91,7 +91,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
|
||||
|
||||
/* Handle IPv4 headers.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
|
||||
@ -106,7 +106,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
|
||||
|
||||
/* Handle IPv6 headers.*/
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
|
||||
|
@ -14,7 +14,7 @@ get_ipv4_5tuple(struct rte_mbuf *m0, int32x4_t mask0,
|
||||
{
|
||||
int32x4_t tmpdata0 = vld1q_s32(rte_pktmbuf_mtod_offset(m0, int32_t *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
|
||||
key->xmm = vandq_s32(tmpdata0, mask0);
|
||||
}
|
||||
@ -26,17 +26,17 @@ get_ipv6_5tuple(struct rte_mbuf *m0, int32x4_t mask0,
|
||||
int32x4_t tmpdata0 = vld1q_s32(
|
||||
rte_pktmbuf_mtod_offset(m0, int *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len)));
|
||||
offsetof(struct rte_ipv6_hdr, payload_len)));
|
||||
|
||||
int32x4_t tmpdata1 = vld1q_s32(
|
||||
rte_pktmbuf_mtod_offset(m0, int *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len) + 8));
|
||||
rte_pktmbuf_mtod_offset(m0, int *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct rte_ipv6_hdr, payload_len) + 8));
|
||||
|
||||
int32x4_t tmpdata2 = vld1q_s32(
|
||||
rte_pktmbuf_mtod_offset(m0, int *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len) + 16));
|
||||
rte_pktmbuf_mtod_offset(m0, int *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct rte_ipv6_hdr, payload_len) + 16));
|
||||
|
||||
key->xmm[0] = vandq_s32(tmpdata0, mask0);
|
||||
key->xmm[1] = tmpdata1;
|
||||
|
@ -14,7 +14,7 @@ get_ipv4_5tuple(struct rte_mbuf *m0, __m128i mask0,
|
||||
__m128i tmpdata0 = _mm_loadu_si128(
|
||||
rte_pktmbuf_mtod_offset(m0, __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
|
||||
key->xmm = _mm_and_si128(tmpdata0, mask0);
|
||||
}
|
||||
@ -26,18 +26,18 @@ get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,
|
||||
__m128i tmpdata0 = _mm_loadu_si128(
|
||||
rte_pktmbuf_mtod_offset(m0, __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len)));
|
||||
offsetof(struct rte_ipv6_hdr, payload_len)));
|
||||
|
||||
__m128i tmpdata1 = _mm_loadu_si128(
|
||||
rte_pktmbuf_mtod_offset(m0, __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len) +
|
||||
offsetof(struct rte_ipv6_hdr, payload_len) +
|
||||
sizeof(__m128i)));
|
||||
|
||||
__m128i tmpdata2 = _mm_loadu_si128(
|
||||
rte_pktmbuf_mtod_offset(m0, __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len) +
|
||||
offsetof(struct rte_ipv6_hdr, payload_len) +
|
||||
sizeof(__m128i) + sizeof(__m128i)));
|
||||
|
||||
key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
|
||||
|
@ -25,8 +25,8 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
uint16_t portid)
|
||||
{
|
||||
uint8_t next_hop;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
uint32_t tcp_or_udp;
|
||||
uint32_t l3_ptypes;
|
||||
|
||||
@ -36,7 +36,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV4)) {
|
||||
|
||||
/* Handle IPv4 headers.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
next_hop = em_get_ipv4_dst_port(ipv4_hdr, portid,
|
||||
@ -51,7 +51,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
} else if (tcp_or_udp && (l3_ptypes == RTE_PTYPE_L3_IPV6)) {
|
||||
|
||||
/* Handle IPv6 headers.*/
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv6_hdr *,
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
next_hop = em_get_ipv6_dst_port(ipv6_hdr, portid,
|
||||
|
@ -86,7 +86,7 @@ lpm_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
|
||||
(struct rte_lpm *)lookup_struct;
|
||||
|
||||
return (uint16_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
|
||||
rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
|
||||
rte_be_to_cpu_32(((struct rte_ipv4_hdr *)ipv4_hdr)->dst_addr),
|
||||
&next_hop) == 0) ? next_hop : portid);
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ lpm_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
|
||||
(struct rte_lpm6 *)lookup_struct;
|
||||
|
||||
return (uint16_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
|
||||
((struct ipv6_hdr *)ipv6_hdr)->dst_addr,
|
||||
((struct rte_ipv6_hdr *)ipv6_hdr)->dst_addr,
|
||||
&next_hop) == 0) ? next_hop : portid);
|
||||
}
|
||||
|
||||
@ -106,21 +106,21 @@ static __rte_always_inline uint16_t
|
||||
lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
uint16_t portid)
|
||||
{
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
|
||||
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
|
||||
return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
|
||||
qconf->ipv4_lookup_struct);
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
|
||||
|
||||
return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
|
||||
qconf->ipv6_lookup_struct);
|
||||
@ -139,7 +139,7 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
uint32_t dst_ipv4, uint16_t portid)
|
||||
{
|
||||
uint32_t next_hop;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
|
||||
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
|
||||
@ -150,7 +150,7 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
|
||||
|
||||
return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
|
||||
ipv6_hdr->dst_addr, &next_hop) == 0)
|
||||
|
@ -10,14 +10,14 @@ l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
struct lcore_conf *qconf)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint16_t dst_port;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
|
||||
|
||||
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
||||
/* Handle IPv4 headers.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
@ -49,9 +49,9 @@ l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint16_t portid,
|
||||
send_single_packet(qconf, m, dst_port);
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
|
||||
/* Handle IPv6 headers.*/
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
dst_port = lpm_get_ipv6_dst_port(ipv6_hdr, portid,
|
||||
|
@ -17,30 +17,30 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
|
||||
vector unsigned int *dip,
|
||||
uint32_t *ipv4_flag)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
uint32_t x0, x1, x2, x3;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x0 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
|
||||
|
||||
rte_compiler_barrier();
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x1 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[1]->packet_type;
|
||||
|
||||
rte_compiler_barrier();
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x2 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[2]->packet_type;
|
||||
|
||||
rte_compiler_barrier();
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x3 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[3]->packet_type;
|
||||
|
||||
|
@ -18,27 +18,27 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
|
||||
int32x4_t *dip,
|
||||
uint32_t *ipv4_flag)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
int32_t dst[FWDSTEP];
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
dst[0] = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
dst[1] = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[1]->packet_type;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
dst[2] = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[2]->packet_type;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
dst[3] = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[3]->packet_type;
|
||||
|
||||
|
@ -15,27 +15,27 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
|
||||
__m128i *dip,
|
||||
uint32_t *ipv4_flag)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
uint32_t x0, x1, x2, x3;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x0 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x1 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[1]->packet_type;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x2 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[2]->packet_type;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x3 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[3]->packet_type;
|
||||
|
||||
|
@ -48,14 +48,18 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
|
||||
vst1q_u32(p[2], ve[2]);
|
||||
vst1q_u32(p[3], ve[3]);
|
||||
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
|
||||
&dst_port[0], pkt[0]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
|
||||
&dst_port[1], pkt[1]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
|
||||
&dst_port[2], pkt[2]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
|
||||
&dst_port[3], pkt[3]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[0] + 1),
|
||||
&dst_port[0], pkt[0]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[1] + 1),
|
||||
&dst_port[1], pkt[1]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[2] + 1),
|
||||
&dst_port[2], pkt[2]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[3] + 1),
|
||||
&dst_port[3], pkt[3]->packet_type);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -113,7 +117,7 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
|
||||
ve = vreinterpretq_u32_s32(val_eth[dst_port[0]]);
|
||||
|
||||
|
||||
rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
|
||||
rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
|
||||
pkt->packet_type);
|
||||
|
||||
ve = vcopyq_laneq_u32(ve, 3, te, 3);
|
||||
|
@ -48,14 +48,18 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
|
||||
_mm_storeu_si128(p[2], te[2]);
|
||||
_mm_storeu_si128(p[3], te[3]);
|
||||
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
|
||||
&dst_port[0], pkt[0]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
|
||||
&dst_port[1], pkt[1]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
|
||||
&dst_port[2], pkt[2]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
|
||||
&dst_port[3], pkt[3]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[0] + 1),
|
||||
&dst_port[0], pkt[0]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[1] + 1),
|
||||
&dst_port[1], pkt[1]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[2] + 1),
|
||||
&dst_port[2], pkt[2]->packet_type);
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[3] + 1),
|
||||
&dst_port[3], pkt[3]->packet_type);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -109,7 +113,7 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
|
||||
te = _mm_loadu_si128((__m128i *)eth_hdr);
|
||||
ve = val_eth[dst_port[0]];
|
||||
|
||||
rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
|
||||
rfc1812_process((struct rte_ipv4_hdr *)(eth_hdr + 1), dst_port,
|
||||
pkt->packet_type);
|
||||
|
||||
te = _mm_blend_epi16(te, ve, MASK_ETH);
|
||||
|
@ -495,7 +495,7 @@ app_lcore_worker(
|
||||
|
||||
for (j = 0; j < bsz_rd; j ++) {
|
||||
struct rte_mbuf *pkt;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint32_t ipv4_dst, pos;
|
||||
uint32_t port;
|
||||
|
||||
@ -508,7 +508,7 @@ app_lcore_worker(
|
||||
|
||||
pkt = lp->mbuf_in.array[j];
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(
|
||||
pkt, struct ipv4_hdr *,
|
||||
pkt, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
|
||||
|
||||
|
@ -751,14 +751,14 @@ send_packetsx4(uint16_t port,
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
static inline int
|
||||
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len)
|
||||
{
|
||||
/* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
|
||||
/*
|
||||
* 1. The packet length reported by the Link Layer must be large
|
||||
* enough to hold the minimum length legal IP datagram (20 bytes).
|
||||
*/
|
||||
if (link_len < sizeof(struct ipv4_hdr))
|
||||
if (link_len < sizeof(struct rte_ipv4_hdr))
|
||||
return -1;
|
||||
|
||||
/* 2. The IP checksum must be correct. */
|
||||
@ -783,7 +783,7 @@ is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
|
||||
* datagram header, whose length is specified in the IP header length
|
||||
* field.
|
||||
*/
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
|
||||
if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
|
||||
return -5;
|
||||
|
||||
return 0;
|
||||
@ -802,7 +802,8 @@ get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
|
||||
int ret = 0;
|
||||
union ipv4_5tuple_host key;
|
||||
|
||||
ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
|
||||
ipv4_hdr = (uint8_t *)ipv4_hdr +
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live);
|
||||
__m128i data = _mm_loadu_si128((__m128i *)(ipv4_hdr));
|
||||
/* Get 5 tuple: dst port, src port, dst IP address, src IP address and
|
||||
protocol */
|
||||
@ -819,7 +820,8 @@ get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
|
||||
int ret = 0;
|
||||
union ipv6_5tuple_host key;
|
||||
|
||||
ipv6_hdr = (uint8_t *)ipv6_hdr + offsetof(struct ipv6_hdr, payload_len);
|
||||
ipv6_hdr = (uint8_t *)ipv6_hdr +
|
||||
offsetof(struct rte_ipv6_hdr, payload_len);
|
||||
__m128i data0 = _mm_loadu_si128((__m128i *)(ipv6_hdr));
|
||||
__m128i data1 = _mm_loadu_si128((__m128i *)(((uint8_t *)ipv6_hdr) +
|
||||
sizeof(__m128i)));
|
||||
@ -849,7 +851,7 @@ get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
|
||||
uint32_t next_hop;
|
||||
|
||||
return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
|
||||
rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
|
||||
rte_be_to_cpu_32(((struct rte_ipv4_hdr *)ipv4_hdr)->dst_addr),
|
||||
&next_hop) == 0) ? next_hop : portid);
|
||||
}
|
||||
|
||||
@ -860,8 +862,8 @@ get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
|
||||
uint32_t next_hop;
|
||||
|
||||
return ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
|
||||
((struct ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
|
||||
next_hop : portid);
|
||||
((struct rte_ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
|
||||
next_hop : portid);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -885,7 +887,7 @@ static inline void
|
||||
simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr[8];
|
||||
struct ipv4_hdr *ipv4_hdr[8];
|
||||
struct rte_ipv4_hdr *ipv4_hdr[8];
|
||||
uint16_t dst_port[8];
|
||||
int32_t ret[8];
|
||||
union ipv4_5tuple_host key[8];
|
||||
@ -901,21 +903,21 @@ simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
|
||||
eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct rte_ether_hdr *);
|
||||
|
||||
/* Handle IPv4 headers.*/
|
||||
ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv4_hdr *,
|
||||
ipv4_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv4_hdr *,
|
||||
ipv4_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv4_hdr *,
|
||||
ipv4_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv4_hdr *,
|
||||
ipv4_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv4_hdr *,
|
||||
ipv4_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv4_hdr *,
|
||||
ipv4_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv4_hdr *,
|
||||
ipv4_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *,
|
||||
ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
@ -968,28 +970,28 @@ simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
|
||||
|
||||
data[0] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[0], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[1] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[1], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[2] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[2], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[3] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[3], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[4] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[4], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[5] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[5], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[6] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[6], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
data[7] = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m[7], __m128i *,
|
||||
sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv4_hdr, time_to_live)));
|
||||
offsetof(struct rte_ipv4_hdr, time_to_live)));
|
||||
|
||||
key[0].xmm = _mm_and_si128(data[0], mask0);
|
||||
key[1].xmm = _mm_and_si128(data[1], mask0);
|
||||
@ -1095,14 +1097,15 @@ static inline void get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,
|
||||
{
|
||||
__m128i tmpdata0 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0,
|
||||
__m128i *, sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len)));
|
||||
offsetof(struct rte_ipv6_hdr, payload_len)));
|
||||
__m128i tmpdata1 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0,
|
||||
__m128i *, sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i)));
|
||||
offsetof(struct rte_ipv6_hdr, payload_len) +
|
||||
sizeof(__m128i)));
|
||||
__m128i tmpdata2 = _mm_loadu_si128(rte_pktmbuf_mtod_offset(m0,
|
||||
__m128i *, sizeof(struct rte_ether_hdr) +
|
||||
offsetof(struct ipv6_hdr, payload_len) + sizeof(__m128i) +
|
||||
sizeof(__m128i)));
|
||||
offsetof(struct rte_ipv6_hdr, payload_len) +
|
||||
sizeof(__m128i) + sizeof(__m128i)));
|
||||
key->xmm[0] = _mm_and_si128(tmpdata0, mask0);
|
||||
key->xmm[1] = tmpdata1;
|
||||
key->xmm[2] = _mm_and_si128(tmpdata2, mask1);
|
||||
@ -1116,7 +1119,7 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
|
||||
struct rte_ether_hdr *eth_hdr[8];
|
||||
union ipv6_5tuple_host key[8];
|
||||
|
||||
__attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8];
|
||||
__attribute__((unused)) struct rte_ipv6_hdr *ipv6_hdr[8];
|
||||
|
||||
eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct rte_ether_hdr *);
|
||||
eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct rte_ether_hdr *);
|
||||
@ -1128,21 +1131,21 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
|
||||
eth_hdr[7] = rte_pktmbuf_mtod(m[7], struct rte_ether_hdr *);
|
||||
|
||||
/* Handle IPv6 headers.*/
|
||||
ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct ipv6_hdr *,
|
||||
ipv6_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct ipv6_hdr *,
|
||||
ipv6_hdr[1] = rte_pktmbuf_mtod_offset(m[1], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct ipv6_hdr *,
|
||||
ipv6_hdr[2] = rte_pktmbuf_mtod_offset(m[2], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct ipv6_hdr *,
|
||||
ipv6_hdr[3] = rte_pktmbuf_mtod_offset(m[3], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct ipv6_hdr *,
|
||||
ipv6_hdr[4] = rte_pktmbuf_mtod_offset(m[4], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct ipv6_hdr *,
|
||||
ipv6_hdr[5] = rte_pktmbuf_mtod_offset(m[5], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct ipv6_hdr *,
|
||||
ipv6_hdr[6] = rte_pktmbuf_mtod_offset(m[6], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv6_hdr *,
|
||||
ipv6_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
get_ipv6_5tuple(m[0], mask1, mask2, &key[0]);
|
||||
@ -1229,14 +1232,14 @@ static __rte_always_inline void
|
||||
l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint16_t dst_port;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
|
||||
|
||||
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
||||
/* Handle IPv4 headers.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
#ifdef DO_RFC_1812_CHECKS
|
||||
@ -1268,9 +1271,9 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
|
||||
send_single_packet(m, dst_port);
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
|
||||
/* Handle IPv6 headers.*/
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
|
||||
dst_port = get_ipv6_dst_port(ipv6_hdr, portid,
|
||||
@ -1302,7 +1305,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
|
||||
#define IPV4_MAX_VER_IHL_DIFF (IPV4_MAX_VER_IHL - IPV4_MIN_VER_IHL)
|
||||
|
||||
/* Minimum value of IPV4 total length (20B) in network byte order. */
|
||||
#define IPV4_MIN_LEN_BE (sizeof(struct ipv4_hdr) << 8)
|
||||
#define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8)
|
||||
|
||||
/*
|
||||
* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2:
|
||||
@ -1316,7 +1319,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
|
||||
* to BAD_PORT value.
|
||||
*/
|
||||
static __rte_always_inline void
|
||||
rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
|
||||
rfc1812_process(struct rte_ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
|
||||
{
|
||||
uint8_t ihl;
|
||||
|
||||
@ -1347,7 +1350,7 @@ static __rte_always_inline uint16_t
|
||||
get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint16_t portid)
|
||||
{
|
||||
uint32_t next_hop;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
|
||||
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
|
||||
@ -1358,7 +1361,7 @@ get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint16_t portid)
|
||||
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
|
||||
|
||||
return (uint16_t) ((rte_lpm6_lookup(
|
||||
RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
|
||||
@ -1374,13 +1377,13 @@ static inline void
|
||||
process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint16_t portid)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint32_t dst_ipv4;
|
||||
uint16_t dp;
|
||||
__m128i te, ve;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
|
||||
dst_ipv4 = ipv4_hdr->dst_addr;
|
||||
dst_ipv4 = rte_be_to_cpu_32(dst_ipv4);
|
||||
@ -1404,27 +1407,27 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
|
||||
__m128i *dip,
|
||||
uint32_t *ipv4_flag)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
uint32_t x0, x1, x2, x3;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[0], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x0 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[1], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x1 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[1]->packet_type;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[2], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x2 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[2]->packet_type;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt[3], struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
x3 = ipv4_hdr->dst_addr;
|
||||
ipv4_flag[0] &= pkt[3]->packet_type;
|
||||
|
||||
@ -1505,13 +1508,17 @@ processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
|
||||
_mm_store_si128(p[2], te[2]);
|
||||
_mm_store_si128(p[3], te[3]);
|
||||
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[0] + 1),
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[0] + 1),
|
||||
&dst_port[0], pkt[0]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[1] + 1),
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[1] + 1),
|
||||
&dst_port[1], pkt[1]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[2] + 1),
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[2] + 1),
|
||||
&dst_port[2], pkt[2]->packet_type);
|
||||
rfc1812_process((struct ipv4_hdr *)((struct rte_ether_hdr *)p[3] + 1),
|
||||
rfc1812_process((struct rte_ipv4_hdr *)
|
||||
((struct rte_ether_hdr *)p[3] + 1),
|
||||
&dst_port[3], pkt[3]->packet_type);
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ transmit_packet(struct rte_mbuf *buf)
|
||||
static inline void
|
||||
handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint32_t ipv4_dst_ip[PKT_READ_SIZE];
|
||||
const void *key_ptrs[PKT_READ_SIZE];
|
||||
unsigned int i;
|
||||
@ -274,8 +274,8 @@ handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
|
||||
|
||||
for (i = 0; i < num_packets; i++) {
|
||||
/* Handle IPv4 header.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i],
|
||||
struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
|
||||
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
|
||||
key_ptrs[i] = &ipv4_dst_ip[i];
|
||||
}
|
||||
|
@ -247,13 +247,13 @@ process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
|
||||
efd_value_t data[RTE_EFD_BURST_MAX];
|
||||
const void *key_ptrs[RTE_EFD_BURST_MAX];
|
||||
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX];
|
||||
|
||||
for (i = 0; i < rx_count; i++) {
|
||||
/* Handle IPv4 header.*/
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
|
||||
sizeof(struct rte_ether_hdr));
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i],
|
||||
struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
|
||||
ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
|
||||
key_ptrs[i] = (void *)&ipv4_dst_ip[i];
|
||||
}
|
||||
|
@ -31,8 +31,8 @@ static void
|
||||
parse_ethernet(struct rte_ether_hdr *eth_hdr, union tunnel_offload_info *info,
|
||||
uint8_t *l4_proto)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
uint16_t ethertype;
|
||||
|
||||
info->outer_l2_len = sizeof(struct rte_ether_hdr);
|
||||
@ -47,15 +47,15 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, union tunnel_offload_info *info,
|
||||
|
||||
switch (ethertype) {
|
||||
case RTE_ETHER_TYPE_IPv4:
|
||||
ipv4_hdr = (struct ipv4_hdr *)
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)
|
||||
((char *)eth_hdr + info->outer_l2_len);
|
||||
info->outer_l3_len = sizeof(struct ipv4_hdr);
|
||||
info->outer_l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
*l4_proto = ipv4_hdr->next_proto_id;
|
||||
break;
|
||||
case RTE_ETHER_TYPE_IPv6:
|
||||
ipv6_hdr = (struct ipv6_hdr *)
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)
|
||||
((char *)eth_hdr + info->outer_l2_len);
|
||||
info->outer_l3_len = sizeof(struct ipv6_hdr);
|
||||
info->outer_l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
*l4_proto = ipv6_hdr->proto;
|
||||
break;
|
||||
default:
|
||||
@ -75,8 +75,8 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr,
|
||||
void *l3_hdr = NULL;
|
||||
uint8_t l4_proto;
|
||||
uint16_t ethertype;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
struct sctp_hdr *sctp_hdr;
|
||||
@ -95,15 +95,15 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr,
|
||||
l3_hdr = (char *)eth_hdr + info->l2_len;
|
||||
|
||||
if (ethertype == RTE_ETHER_TYPE_IPv4) {
|
||||
ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
|
||||
ipv4_hdr->hdr_checksum = 0;
|
||||
ol_flags |= PKT_TX_IPV4;
|
||||
ol_flags |= PKT_TX_IP_CKSUM;
|
||||
info->l3_len = sizeof(struct ipv4_hdr);
|
||||
info->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
l4_proto = ipv4_hdr->next_proto_id;
|
||||
} else if (ethertype == RTE_ETHER_TYPE_IPv6) {
|
||||
ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
|
||||
info->l3_len = sizeof(struct ipv6_hdr);
|
||||
ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
|
||||
info->l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
l4_proto = ipv6_hdr->proto;
|
||||
ol_flags |= PKT_TX_IPV6;
|
||||
} else
|
||||
@ -182,10 +182,10 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id)
|
||||
/*Allocate space for new ethernet, IPv4, UDP and VXLAN headers*/
|
||||
struct rte_ether_hdr *pneth =
|
||||
(struct rte_ether_hdr *) rte_pktmbuf_prepend(m,
|
||||
sizeof(struct rte_ether_hdr) + sizeof(struct ipv4_hdr)
|
||||
sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr)
|
||||
+ sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr));
|
||||
|
||||
struct ipv4_hdr *ip = (struct ipv4_hdr *) &pneth[1];
|
||||
struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *) &pneth[1];
|
||||
struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
|
||||
struct rte_vxlan_hdr *vxlan = (struct rte_vxlan_hdr *) &udp[1];
|
||||
|
||||
@ -198,7 +198,7 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id)
|
||||
|
||||
/* copy in IP header */
|
||||
ip = rte_memcpy(ip, &app_ip_hdr[vport_id],
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
ip->total_length = rte_cpu_to_be_16(m->pkt_len
|
||||
- sizeof(struct rte_ether_hdr));
|
||||
|
||||
@ -216,7 +216,7 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id)
|
||||
}
|
||||
|
||||
m->outer_l2_len = sizeof(struct rte_ether_hdr);
|
||||
m->outer_l3_len = sizeof(struct ipv4_hdr);
|
||||
m->outer_l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
|
||||
ol_flags |= PKT_TX_TUNNEL_VXLAN;
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
#define VXLAN_HF_VNI 0x08000000
|
||||
#define DEFAULT_VXLAN_PORT 4789
|
||||
|
||||
extern struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
|
||||
extern struct rte_ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
|
||||
extern struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS];
|
||||
extern uint8_t tx_checksum;
|
||||
extern uint16_t tso_segsz;
|
||||
|
@ -48,7 +48,7 @@
|
||||
/* VXLAN device */
|
||||
struct vxlan_conf vxdev;
|
||||
|
||||
struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
|
||||
struct rte_ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
|
||||
struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS];
|
||||
|
||||
/* local VTEP IP address */
|
||||
@ -229,7 +229,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
|
||||
int i, ret;
|
||||
struct rte_ether_hdr *pkt_hdr;
|
||||
uint64_t portid = vdev->vid;
|
||||
struct ipv4_hdr *ip;
|
||||
struct rte_ipv4_hdr *ip;
|
||||
|
||||
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
|
||||
|
||||
|
@ -865,7 +865,7 @@ get_psd_sum(void *l3_hdr, uint64_t ol_flags)
|
||||
static void virtio_tx_offload(struct rte_mbuf *m)
|
||||
{
|
||||
void *l3_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr = NULL;
|
||||
struct rte_ipv4_hdr *ipv4_hdr = NULL;
|
||||
struct tcp_hdr *tcp_hdr = NULL;
|
||||
struct rte_ether_hdr *eth_hdr =
|
||||
rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
|
||||
|
@ -628,7 +628,7 @@ static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
|
||||
* Note: IPv4 options are handled by dedicated pattern items.
|
||||
*/
|
||||
struct rte_flow_item_ipv4 {
|
||||
struct ipv4_hdr hdr; /**< IPv4 header definition. */
|
||||
struct rte_ipv4_hdr hdr; /**< IPv4 header definition. */
|
||||
};
|
||||
|
||||
/** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
|
||||
@ -650,7 +650,7 @@ static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
|
||||
* RTE_FLOW_ITEM_TYPE_IPV6_EXT.
|
||||
*/
|
||||
struct rte_flow_item_ipv6 {
|
||||
struct ipv6_hdr hdr; /**< IPv6 header definition. */
|
||||
struct rte_ipv6_hdr hdr; /**< IPv6 header definition. */
|
||||
};
|
||||
|
||||
/** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
|
||||
|
@ -611,8 +611,8 @@ rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
|
||||
struct ipv6_hdr **ipv6_hdr)
|
||||
rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
|
||||
struct rte_ipv6_hdr **ipv6_hdr)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr =
|
||||
rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
|
||||
@ -623,21 +623,21 @@ rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
|
||||
|
||||
switch (eth_hdr->ether_type) {
|
||||
case RTE_BE16(RTE_ETHER_TYPE_IPv4):
|
||||
*ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
*ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
|
||||
break;
|
||||
|
||||
case RTE_BE16(RTE_ETHER_TYPE_IPv6):
|
||||
*ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
|
||||
*ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
|
||||
break;
|
||||
|
||||
case RTE_BE16(RTE_ETHER_TYPE_VLAN):
|
||||
vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
|
||||
switch (vlan_hdr->eth_proto) {
|
||||
case RTE_BE16(RTE_ETHER_TYPE_IPv4):
|
||||
*ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);
|
||||
*ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
|
||||
break;
|
||||
case RTE_BE16(RTE_ETHER_TYPE_IPv6):
|
||||
*ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);
|
||||
*ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -657,8 +657,8 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
|
||||
void *tuple;
|
||||
struct rte_ipv4_tuple ipv4_tuple;
|
||||
struct rte_ipv6_tuple ipv6_tuple;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
|
||||
rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
|
||||
|
||||
|
@ -180,10 +180,10 @@ insert_new_flow(struct gro_tcp4_tbl *tbl,
|
||||
static inline void
|
||||
update_header(struct gro_tcp4_item *item)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_mbuf *pkt = item->firstseg;
|
||||
|
||||
ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
pkt->l2_len);
|
||||
ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len -
|
||||
pkt->l2_len);
|
||||
@ -195,7 +195,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
|
||||
uint64_t start_time)
|
||||
{
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
uint32_t sent_seq;
|
||||
int32_t tcp_dl;
|
||||
@ -216,7 +216,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
|
||||
return -1;
|
||||
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
ipv4_hdr = (struct ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
|
||||
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
|
||||
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
|
||||
|
||||
|
@ -269,11 +269,11 @@ check_seq_option(struct gro_tcp4_item *item,
|
||||
uint8_t is_atomic)
|
||||
{
|
||||
struct rte_mbuf *pkt_orig = item->firstseg;
|
||||
struct ipv4_hdr *iph_orig;
|
||||
struct rte_ipv4_hdr *iph_orig;
|
||||
struct tcp_hdr *tcph_orig;
|
||||
uint16_t len, tcp_hl_orig;
|
||||
|
||||
iph_orig = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) +
|
||||
iph_orig = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt_orig, char *) +
|
||||
l2_offset + pkt_orig->l2_len);
|
||||
tcph_orig = (struct tcp_hdr *)((char *)iph_orig + pkt_orig->l3_len);
|
||||
tcp_hl_orig = pkt_orig->l4_len;
|
||||
|
@ -263,14 +263,14 @@ merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item,
|
||||
static inline void
|
||||
update_vxlan_header(struct gro_vxlan_tcp4_item *item)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
struct rte_mbuf *pkt = item->inner_item.firstseg;
|
||||
uint16_t len;
|
||||
|
||||
/* Update the outer IPv4 header. */
|
||||
len = pkt->pkt_len - pkt->outer_l2_len;
|
||||
ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
pkt->outer_l2_len);
|
||||
ipv4_hdr->total_length = rte_cpu_to_be_16(len);
|
||||
|
||||
@ -281,7 +281,7 @@ update_vxlan_header(struct gro_vxlan_tcp4_item *item)
|
||||
|
||||
/* Update the inner IPv4 header. */
|
||||
len -= pkt->l2_len;
|
||||
ipv4_hdr = (struct ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
|
||||
ipv4_hdr->total_length = rte_cpu_to_be_16(len);
|
||||
}
|
||||
|
||||
@ -291,7 +291,7 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
|
||||
uint64_t start_time)
|
||||
{
|
||||
struct rte_ether_hdr *outer_eth_hdr, *eth_hdr;
|
||||
struct ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
struct rte_vxlan_hdr *vxlan_hdr;
|
||||
@ -315,7 +315,7 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
|
||||
return -1;
|
||||
|
||||
outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
outer_ipv4_hdr = (struct ipv4_hdr *)((char *)outer_eth_hdr +
|
||||
outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr +
|
||||
pkt->outer_l2_len);
|
||||
udp_hdr = (struct udp_hdr *)((char *)outer_ipv4_hdr +
|
||||
pkt->outer_l3_len);
|
||||
@ -323,7 +323,7 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
|
||||
sizeof(struct udp_hdr));
|
||||
eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr +
|
||||
sizeof(struct rte_vxlan_hdr));
|
||||
ipv4_hdr = (struct ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
|
||||
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
|
||||
|
||||
/*
|
||||
|
@ -98,9 +98,9 @@ update_tcp_header(struct rte_mbuf *pkt, uint16_t l4_offset, uint32_t sent_seq,
|
||||
static inline void
|
||||
update_ipv4_header(struct rte_mbuf *pkt, uint16_t l3_offset, uint16_t id)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
|
||||
ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
l3_offset);
|
||||
ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len - l3_offset);
|
||||
ipv4_hdr->packet_id = rte_cpu_to_be_16(id);
|
||||
|
@ -9,14 +9,14 @@ static void
|
||||
update_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
|
||||
struct rte_mbuf **segs, uint16_t nb_segs)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
uint32_t sent_seq;
|
||||
uint16_t id, tail_idx, i;
|
||||
uint16_t l3_offset = pkt->l2_len;
|
||||
uint16_t l4_offset = l3_offset + pkt->l3_len;
|
||||
|
||||
ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) +
|
||||
l3_offset);
|
||||
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
|
||||
id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
|
||||
@ -40,13 +40,13 @@ gso_tcp4_segment(struct rte_mbuf *pkt,
|
||||
struct rte_mbuf **pkts_out,
|
||||
uint16_t nb_pkts_out)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint16_t pyld_unit_size, hdr_offset;
|
||||
uint16_t frag_off;
|
||||
int ret;
|
||||
|
||||
/* Don't process the fragmented packet */
|
||||
ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
pkt->l2_len);
|
||||
frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
|
||||
if (unlikely(IS_FRAGMENTED(frag_off))) {
|
||||
|
@ -9,7 +9,7 @@ static void
|
||||
update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
|
||||
struct rte_mbuf **segs, uint16_t nb_segs)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
uint32_t sent_seq;
|
||||
uint16_t outer_id, inner_id, tail_idx, i;
|
||||
@ -23,12 +23,12 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
|
||||
tcp_offset = inner_ipv4_offset + pkt->l3_len;
|
||||
|
||||
/* Outer IPv4 header. */
|
||||
ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
outer_ipv4_offset);
|
||||
outer_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
|
||||
|
||||
/* Inner IPv4 header. */
|
||||
ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
inner_ipv4_offset);
|
||||
inner_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
|
||||
|
||||
@ -60,12 +60,12 @@ gso_tunnel_tcp4_segment(struct rte_mbuf *pkt,
|
||||
struct rte_mbuf **pkts_out,
|
||||
uint16_t nb_pkts_out)
|
||||
{
|
||||
struct ipv4_hdr *inner_ipv4_hdr;
|
||||
struct rte_ipv4_hdr *inner_ipv4_hdr;
|
||||
uint16_t pyld_unit_size, hdr_offset, frag_off;
|
||||
int ret = 1;
|
||||
|
||||
hdr_offset = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len;
|
||||
inner_ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
inner_ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
|
||||
hdr_offset);
|
||||
/*
|
||||
* Don't process the packet whose MF bit or offset in the inner
|
||||
|
@ -11,7 +11,7 @@ static inline void
|
||||
update_ipv4_udp_headers(struct rte_mbuf *pkt, struct rte_mbuf **segs,
|
||||
uint16_t nb_segs)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint16_t frag_offset = 0, is_mf;
|
||||
uint16_t l2_hdrlen = pkt->l2_len, l3_hdrlen = pkt->l3_len;
|
||||
uint16_t tail_idx = nb_segs - 1, length, i;
|
||||
@ -22,8 +22,8 @@ update_ipv4_udp_headers(struct rte_mbuf *pkt, struct rte_mbuf **segs,
|
||||
* length.
|
||||
*/
|
||||
for (i = 0; i < nb_segs; i++) {
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(segs[i], struct ipv4_hdr *,
|
||||
l2_hdrlen);
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(segs[i],
|
||||
struct rte_ipv4_hdr *, l2_hdrlen);
|
||||
length = segs[i]->pkt_len - l2_hdrlen;
|
||||
ipv4_hdr->total_length = rte_cpu_to_be_16(length);
|
||||
|
||||
@ -42,13 +42,13 @@ gso_udp4_segment(struct rte_mbuf *pkt,
|
||||
struct rte_mbuf **pkts_out,
|
||||
uint16_t nb_pkts_out)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
uint16_t pyld_unit_size, hdr_offset;
|
||||
uint16_t frag_off;
|
||||
int ret;
|
||||
|
||||
/* Don't process the fragmented packet */
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
|
||||
pkt->l2_len);
|
||||
frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
|
||||
if (unlikely(IS_FRAGMENTED(frag_off))) {
|
||||
|
@ -19,11 +19,11 @@ extern "C" {
|
||||
|
||||
/* Minimum GSO segment size for TCP based packets. */
|
||||
#define RTE_GSO_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
|
||||
sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr) + 1)
|
||||
sizeof(struct rte_ipv4_hdr) + sizeof(struct tcp_hdr) + 1)
|
||||
|
||||
/* Minimum GSO segment size for UDP based packets. */
|
||||
#define RTE_GSO_UDP_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
|
||||
sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + 1)
|
||||
sizeof(struct rte_ipv4_hdr) + sizeof(struct udp_hdr) + 1)
|
||||
|
||||
/* GSO flags for rte_gso_ctx. */
|
||||
#define RTE_GSO_FLAG_IPID_FIXED (1ULL << 0)
|
||||
|
@ -139,7 +139,8 @@ rte_convert_rss_key(const uint32_t *orig, uint32_t *targ, int len)
|
||||
* Pointer to rte_ipv6_tuple structure
|
||||
*/
|
||||
static inline void
|
||||
rte_thash_load_v6_addrs(const struct ipv6_hdr *orig, union rte_thash_tuple *targ)
|
||||
rte_thash_load_v6_addrs(const struct rte_ipv6_hdr *orig,
|
||||
union rte_thash_tuple *targ)
|
||||
{
|
||||
#ifdef RTE_ARCH_X86
|
||||
__m128i ipv6 = _mm_loadu_si128((const __m128i *)orig->src_addr);
|
||||
|
@ -219,7 +219,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
*/
|
||||
struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
|
||||
struct rte_ip_frag_death_row *dr,
|
||||
struct rte_mbuf *mb, uint64_t tms, struct ipv6_hdr *ip_hdr,
|
||||
struct rte_mbuf *mb, uint64_t tms, struct rte_ipv6_hdr *ip_hdr,
|
||||
struct ipv6_extension_fragment *frag_hdr);
|
||||
|
||||
/**
|
||||
@ -234,7 +234,7 @@ struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
|
||||
* present.
|
||||
*/
|
||||
static inline struct ipv6_extension_fragment *
|
||||
rte_ipv6_frag_get_ipv6_fragment_header(struct ipv6_hdr *hdr)
|
||||
rte_ipv6_frag_get_ipv6_fragment_header(struct rte_ipv6_hdr *hdr)
|
||||
{
|
||||
if (hdr->proto == IPPROTO_FRAGMENT) {
|
||||
return (struct ipv6_extension_fragment *) ++hdr;
|
||||
@ -293,7 +293,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
*/
|
||||
struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
|
||||
struct rte_ip_frag_death_row *dr,
|
||||
struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr);
|
||||
struct rte_mbuf *mb, uint64_t tms, struct rte_ipv4_hdr *ip_hdr);
|
||||
|
||||
/**
|
||||
* Check if the IPv4 packet is fragmented
|
||||
@ -304,7 +304,8 @@ struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
|
||||
* 1 if fragmented, 0 if not fragmented
|
||||
*/
|
||||
static inline int
|
||||
rte_ipv4_frag_pkt_is_fragmented(const struct ipv4_hdr * hdr) {
|
||||
rte_ipv4_frag_pkt_is_fragmented(const struct rte_ipv4_hdr *hdr)
|
||||
{
|
||||
uint16_t flag_offset, ip_flag, ip_ofs;
|
||||
|
||||
flag_offset = rte_be_to_cpu_16(hdr->fragment_offset);
|
||||
|
@ -21,8 +21,8 @@
|
||||
|
||||
#define IPV4_HDR_FO_ALIGN (1 << IPV4_HDR_FO_SHIFT)
|
||||
|
||||
static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst,
|
||||
const struct ipv4_hdr *src, uint16_t len, uint16_t fofs,
|
||||
static inline void __fill_ipv4hdr_frag(struct rte_ipv4_hdr *dst,
|
||||
const struct rte_ipv4_hdr *src, uint16_t len, uint16_t fofs,
|
||||
uint16_t dofs, uint32_t mf)
|
||||
{
|
||||
rte_memcpy(dst, src, sizeof(*dst));
|
||||
@ -70,7 +70,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
struct rte_mempool *pool_indirect)
|
||||
{
|
||||
struct rte_mbuf *in_seg = NULL;
|
||||
struct ipv4_hdr *in_hdr;
|
||||
struct rte_ipv4_hdr *in_hdr;
|
||||
uint32_t out_pkt_pos, in_seg_data_pos;
|
||||
uint32_t more_in_segs;
|
||||
uint16_t fragment_offset, flag_offset, frag_size;
|
||||
@ -80,10 +80,10 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
* Ensure the IP payload length of all fragments is aligned to a
|
||||
* multiple of 8 bytes as per RFC791 section 2.3.
|
||||
*/
|
||||
frag_size = RTE_ALIGN_FLOOR((mtu_size - sizeof(struct ipv4_hdr)),
|
||||
frag_size = RTE_ALIGN_FLOOR((mtu_size - sizeof(struct rte_ipv4_hdr)),
|
||||
IPV4_HDR_FO_ALIGN);
|
||||
|
||||
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
|
||||
in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
|
||||
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
|
||||
|
||||
/* If Don't Fragment flag is set */
|
||||
@ -92,11 +92,11 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
|
||||
/* Check that pkts_out is big enough to hold all fragments */
|
||||
if (unlikely(frag_size * nb_pkts_out <
|
||||
(uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
|
||||
(uint16_t)(pkt_in->pkt_len - sizeof(struct rte_ipv4_hdr))))
|
||||
return -EINVAL;
|
||||
|
||||
in_seg = pkt_in;
|
||||
in_seg_data_pos = sizeof(struct ipv4_hdr);
|
||||
in_seg_data_pos = sizeof(struct rte_ipv4_hdr);
|
||||
out_pkt_pos = 0;
|
||||
fragment_offset = 0;
|
||||
|
||||
@ -104,7 +104,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
while (likely(more_in_segs)) {
|
||||
struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
|
||||
uint32_t more_out_segs;
|
||||
struct ipv4_hdr *out_hdr;
|
||||
struct rte_ipv4_hdr *out_hdr;
|
||||
|
||||
/* Allocate direct buffer */
|
||||
out_pkt = rte_pktmbuf_alloc(pool_direct);
|
||||
@ -114,8 +114,8 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
}
|
||||
|
||||
/* Reserve space for the IP header that will be built later */
|
||||
out_pkt->data_len = sizeof(struct ipv4_hdr);
|
||||
out_pkt->pkt_len = sizeof(struct ipv4_hdr);
|
||||
out_pkt->data_len = sizeof(struct rte_ipv4_hdr);
|
||||
out_pkt->pkt_len = sizeof(struct rte_ipv4_hdr);
|
||||
frag_bytes_remaining = frag_size;
|
||||
|
||||
out_seg_prev = out_pkt;
|
||||
@ -164,17 +164,17 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
|
||||
/* Build the IP header */
|
||||
|
||||
out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv4_hdr *);
|
||||
out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
|
||||
|
||||
__fill_ipv4hdr_frag(out_hdr, in_hdr,
|
||||
(uint16_t)out_pkt->pkt_len,
|
||||
flag_offset, fragment_offset, more_in_segs);
|
||||
|
||||
fragment_offset = (uint16_t)(fragment_offset +
|
||||
out_pkt->pkt_len - sizeof(struct ipv4_hdr));
|
||||
out_pkt->pkt_len - sizeof(struct rte_ipv4_hdr));
|
||||
|
||||
out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
|
||||
out_pkt->l3_len = sizeof(struct ipv4_hdr);
|
||||
out_pkt->l3_len = sizeof(struct rte_ipv4_hdr);
|
||||
|
||||
/* Write the fragment to the output list */
|
||||
pkts_out[out_pkt_pos] = out_pkt;
|
||||
|
@ -14,7 +14,7 @@
|
||||
struct rte_mbuf *
|
||||
ipv4_frag_reassemble(struct ip_frag_pkt *fp)
|
||||
{
|
||||
struct ipv4_hdr *ip_hdr;
|
||||
struct rte_ipv4_hdr *ip_hdr;
|
||||
struct rte_mbuf *m, *prev;
|
||||
uint32_t i, n, ofs, first_len;
|
||||
uint32_t curr_idx = 0;
|
||||
@ -70,7 +70,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
|
||||
m->ol_flags |= PKT_TX_IP_CKSUM;
|
||||
|
||||
/* update ipv4 header for the reassembled packet */
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len);
|
||||
|
||||
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
|
||||
m->l3_len));
|
||||
@ -100,7 +100,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
|
||||
struct rte_mbuf *
|
||||
rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
|
||||
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
|
||||
struct ipv4_hdr *ip_hdr)
|
||||
struct rte_ipv4_hdr *ip_hdr)
|
||||
{
|
||||
struct ip_frag_pkt *fp;
|
||||
struct ip_frag_key key;
|
||||
|
@ -18,8 +18,8 @@
|
||||
*/
|
||||
|
||||
static inline void
|
||||
__fill_ipv6hdr_frag(struct ipv6_hdr *dst,
|
||||
const struct ipv6_hdr *src, uint16_t len, uint16_t fofs,
|
||||
__fill_ipv6hdr_frag(struct rte_ipv6_hdr *dst,
|
||||
const struct rte_ipv6_hdr *src, uint16_t len, uint16_t fofs,
|
||||
uint32_t mf)
|
||||
{
|
||||
struct ipv6_extension_fragment *fh;
|
||||
@ -73,7 +73,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
struct rte_mempool *pool_indirect)
|
||||
{
|
||||
struct rte_mbuf *in_seg = NULL;
|
||||
struct ipv6_hdr *in_hdr;
|
||||
struct rte_ipv6_hdr *in_hdr;
|
||||
uint32_t out_pkt_pos, in_seg_data_pos;
|
||||
uint32_t more_in_segs;
|
||||
uint16_t fragment_offset, frag_size;
|
||||
@ -83,18 +83,18 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
* Ensure the IP payload length of all fragments (except the
|
||||
* the last fragment) are a multiple of 8 bytes per RFC2460.
|
||||
*/
|
||||
frag_size = RTE_ALIGN_FLOOR(mtu_size - sizeof(struct ipv6_hdr),
|
||||
frag_size = RTE_ALIGN_FLOOR(mtu_size - sizeof(struct rte_ipv6_hdr),
|
||||
RTE_IPV6_EHDR_FO_ALIGN);
|
||||
|
||||
/* Check that pkts_out is big enough to hold all fragments */
|
||||
if (unlikely (frag_size * nb_pkts_out <
|
||||
(uint16_t)(pkt_in->pkt_len - sizeof (struct ipv6_hdr))))
|
||||
(uint16_t)(pkt_in->pkt_len - sizeof(struct rte_ipv6_hdr))))
|
||||
return -EINVAL;
|
||||
|
||||
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv6_hdr *);
|
||||
in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv6_hdr *);
|
||||
|
||||
in_seg = pkt_in;
|
||||
in_seg_data_pos = sizeof(struct ipv6_hdr);
|
||||
in_seg_data_pos = sizeof(struct rte_ipv6_hdr);
|
||||
out_pkt_pos = 0;
|
||||
fragment_offset = 0;
|
||||
|
||||
@ -102,7 +102,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
while (likely(more_in_segs)) {
|
||||
struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
|
||||
uint32_t more_out_segs;
|
||||
struct ipv6_hdr *out_hdr;
|
||||
struct rte_ipv6_hdr *out_hdr;
|
||||
|
||||
/* Allocate direct buffer */
|
||||
out_pkt = rte_pktmbuf_alloc(pool_direct);
|
||||
@ -112,8 +112,10 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
}
|
||||
|
||||
/* Reserve space for the IP header that will be built later */
|
||||
out_pkt->data_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
|
||||
out_pkt->pkt_len = sizeof(struct ipv6_hdr) + sizeof(struct ipv6_extension_fragment);
|
||||
out_pkt->data_len = sizeof(struct rte_ipv6_hdr) +
|
||||
sizeof(struct ipv6_extension_fragment);
|
||||
out_pkt->pkt_len = sizeof(struct rte_ipv6_hdr) +
|
||||
sizeof(struct ipv6_extension_fragment);
|
||||
frag_bytes_remaining = frag_size;
|
||||
|
||||
out_seg_prev = out_pkt;
|
||||
@ -163,14 +165,14 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
|
||||
|
||||
/* Build the IP header */
|
||||
|
||||
out_hdr = rte_pktmbuf_mtod(out_pkt, struct ipv6_hdr *);
|
||||
out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv6_hdr *);
|
||||
|
||||
__fill_ipv6hdr_frag(out_hdr, in_hdr,
|
||||
(uint16_t) out_pkt->pkt_len - sizeof(struct ipv6_hdr),
|
||||
(uint16_t) out_pkt->pkt_len - sizeof(struct rte_ipv6_hdr),
|
||||
fragment_offset, more_in_segs);
|
||||
|
||||
fragment_offset = (uint16_t)(fragment_offset +
|
||||
out_pkt->pkt_len - sizeof(struct ipv6_hdr)
|
||||
out_pkt->pkt_len - sizeof(struct rte_ipv6_hdr)
|
||||
- sizeof(struct ipv6_extension_fragment));
|
||||
|
||||
/* Write the fragment to the output list */
|
||||
|
@ -32,7 +32,7 @@ ip_frag_memmove(char *dst, char *src, int len)
|
||||
struct rte_mbuf *
|
||||
ipv6_frag_reassemble(struct ip_frag_pkt *fp)
|
||||
{
|
||||
struct ipv6_hdr *ip_hdr;
|
||||
struct rte_ipv6_hdr *ip_hdr;
|
||||
struct ipv6_extension_fragment *frag_hdr;
|
||||
struct rte_mbuf *m, *prev;
|
||||
uint32_t i, n, ofs, first_len;
|
||||
@ -93,7 +93,7 @@ ipv6_frag_reassemble(struct ip_frag_pkt *fp)
|
||||
m->ol_flags |= PKT_TX_IP_CKSUM;
|
||||
|
||||
/* update ipv6 header for the reassembled datagram */
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, m->l2_len);
|
||||
ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->l2_len);
|
||||
|
||||
ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
|
||||
|
||||
@ -139,7 +139,7 @@ ipv6_frag_reassemble(struct ip_frag_pkt *fp)
|
||||
struct rte_mbuf *
|
||||
rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
|
||||
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
|
||||
struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
|
||||
struct rte_ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
|
||||
{
|
||||
struct ip_frag_pkt *fp;
|
||||
struct ip_frag_key key;
|
||||
|
@ -40,8 +40,8 @@ static inline int
|
||||
update_trs_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,
|
||||
uint32_t l2len, uint32_t l3len, uint8_t proto)
|
||||
{
|
||||
struct ipv4_hdr *v4h;
|
||||
struct ipv6_hdr *v6h;
|
||||
struct rte_ipv4_hdr *v4h;
|
||||
struct rte_ipv6_hdr *v6h;
|
||||
int32_t rc;
|
||||
|
||||
if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
|
||||
@ -67,8 +67,8 @@ static inline void
|
||||
update_tun_l3hdr(const struct rte_ipsec_sa *sa, void *p, uint32_t plen,
|
||||
uint32_t l2len, rte_be16_t pid)
|
||||
{
|
||||
struct ipv4_hdr *v4h;
|
||||
struct ipv6_hdr *v6h;
|
||||
struct rte_ipv4_hdr *v4h;
|
||||
struct rte_ipv6_hdr *v6h;
|
||||
|
||||
if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) {
|
||||
v4h = p;
|
||||
|
@ -28,7 +28,7 @@ extern "C" {
|
||||
/**
|
||||
* IPv4 Header
|
||||
*/
|
||||
struct ipv4_hdr {
|
||||
struct rte_ipv4_hdr {
|
||||
uint8_t version_ihl; /**< version and header length */
|
||||
uint8_t type_of_service; /**< type of service */
|
||||
uint16_t total_length; /**< length of packet */
|
||||
@ -249,10 +249,10 @@ rte_raw_cksum_mbuf(const struct rte_mbuf *m, uint32_t off, uint32_t len,
|
||||
* The complemented checksum to set in the IP packet.
|
||||
*/
|
||||
static inline uint16_t
|
||||
rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
|
||||
rte_ipv4_cksum(const struct rte_ipv4_hdr *ipv4_hdr)
|
||||
{
|
||||
uint16_t cksum;
|
||||
cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr));
|
||||
cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct rte_ipv4_hdr));
|
||||
return (cksum == 0xffff) ? cksum : (uint16_t)~cksum;
|
||||
}
|
||||
|
||||
@ -275,7 +275,7 @@ rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
|
||||
* The non-complemented checksum to set in the L4 header.
|
||||
*/
|
||||
static inline uint16_t
|
||||
rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
|
||||
rte_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
|
||||
{
|
||||
struct ipv4_psd_header {
|
||||
uint32_t src_addr; /* IP address of source host. */
|
||||
@ -294,7 +294,7 @@ rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
|
||||
} else {
|
||||
psd_hdr.len = rte_cpu_to_be_16(
|
||||
(uint16_t)(rte_be_to_cpu_16(ipv4_hdr->total_length)
|
||||
- sizeof(struct ipv4_hdr)));
|
||||
- sizeof(struct rte_ipv4_hdr)));
|
||||
}
|
||||
return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr));
|
||||
}
|
||||
@ -314,16 +314,16 @@ rte_ipv4_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
|
||||
* or 0 on error
|
||||
*/
|
||||
static inline uint16_t
|
||||
rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
|
||||
rte_ipv4_udptcp_cksum(const struct rte_ipv4_hdr *ipv4_hdr, const void *l4_hdr)
|
||||
{
|
||||
uint32_t cksum;
|
||||
uint32_t l3_len, l4_len;
|
||||
|
||||
l3_len = rte_be_to_cpu_16(ipv4_hdr->total_length);
|
||||
if (l3_len < sizeof(struct ipv4_hdr))
|
||||
if (l3_len < sizeof(struct rte_ipv4_hdr))
|
||||
return 0;
|
||||
|
||||
l4_len = l3_len - sizeof(struct ipv4_hdr);
|
||||
l4_len = l3_len - sizeof(struct rte_ipv4_hdr);
|
||||
|
||||
cksum = rte_raw_cksum(l4_hdr, l4_len);
|
||||
cksum += rte_ipv4_phdr_cksum(ipv4_hdr, 0);
|
||||
@ -339,7 +339,7 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
|
||||
/**
|
||||
* IPv6 Header
|
||||
*/
|
||||
struct ipv6_hdr {
|
||||
struct rte_ipv6_hdr {
|
||||
uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
|
||||
uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */
|
||||
uint8_t proto; /**< Protocol, next header. */
|
||||
@ -371,7 +371,7 @@ struct ipv6_hdr {
|
||||
* The non-complemented checksum to set in the L4 header.
|
||||
*/
|
||||
static inline uint16_t
|
||||
rte_ipv6_phdr_cksum(const struct ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
|
||||
rte_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
|
||||
{
|
||||
uint32_t sum;
|
||||
struct {
|
||||
@ -407,7 +407,7 @@ rte_ipv6_phdr_cksum(const struct ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
|
||||
* The complemented checksum to set in the IP packet.
|
||||
*/
|
||||
static inline uint16_t
|
||||
rte_ipv6_udptcp_cksum(const struct ipv6_hdr *ipv6_hdr, const void *l4_hdr)
|
||||
rte_ipv6_udptcp_cksum(const struct rte_ipv6_hdr *ipv6_hdr, const void *l4_hdr)
|
||||
{
|
||||
uint32_t cksum;
|
||||
uint32_t l4_len;
|
||||
|
@ -173,7 +173,7 @@ ptype_tunnel(uint16_t *proto, const struct rte_mbuf *m,
|
||||
|
||||
/* get the ipv4 header length */
|
||||
static uint8_t
|
||||
ip4_hlen(const struct ipv4_hdr *hdr)
|
||||
ip4_hlen(const struct rte_ipv4_hdr *hdr)
|
||||
{
|
||||
return (hdr->version_ihl & 0xf) * 4;
|
||||
}
|
||||
@ -300,8 +300,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
|
||||
return pkt_type;
|
||||
|
||||
if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
|
||||
const struct ipv4_hdr *ip4h;
|
||||
struct ipv4_hdr ip4h_copy;
|
||||
const struct rte_ipv4_hdr *ip4h;
|
||||
struct rte_ipv4_hdr ip4h_copy;
|
||||
|
||||
ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
|
||||
if (unlikely(ip4h == NULL))
|
||||
@ -323,8 +323,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
|
||||
proto = ip4h->next_proto_id;
|
||||
pkt_type |= ptype_l4(proto);
|
||||
} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
|
||||
const struct ipv6_hdr *ip6h;
|
||||
struct ipv6_hdr ip6h_copy;
|
||||
const struct rte_ipv6_hdr *ip6h;
|
||||
struct rte_ipv6_hdr ip6h_copy;
|
||||
int frag = 0;
|
||||
|
||||
ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
|
||||
@ -432,8 +432,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
|
||||
return pkt_type;
|
||||
|
||||
if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
|
||||
const struct ipv4_hdr *ip4h;
|
||||
struct ipv4_hdr ip4h_copy;
|
||||
const struct rte_ipv4_hdr *ip4h;
|
||||
struct rte_ipv4_hdr ip4h_copy;
|
||||
|
||||
ip4h = rte_pktmbuf_read(m, off, sizeof(*ip4h), &ip4h_copy);
|
||||
if (unlikely(ip4h == NULL))
|
||||
@ -455,8 +455,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
|
||||
proto = ip4h->next_proto_id;
|
||||
pkt_type |= ptype_inner_l4(proto);
|
||||
} else if (proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6)) {
|
||||
const struct ipv6_hdr *ip6h;
|
||||
struct ipv6_hdr ip6h_copy;
|
||||
const struct rte_ipv6_hdr *ip6h;
|
||||
struct rte_ipv6_hdr ip6h_copy;
|
||||
int frag = 0;
|
||||
|
||||
ip6h = rte_pktmbuf_read(m, off, sizeof(*ip6h), &ip6h_copy);
|
||||
|
@ -112,8 +112,8 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
|
||||
static inline int
|
||||
rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
uint64_t inner_l3_offset = m->l2_len;
|
||||
@ -144,7 +144,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
|
||||
#endif
|
||||
|
||||
if (ol_flags & PKT_TX_IPV4) {
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
||||
inner_l3_offset);
|
||||
|
||||
if (ol_flags & PKT_TX_IP_CKSUM)
|
||||
@ -158,8 +158,8 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
|
||||
udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
|
||||
ol_flags);
|
||||
} else {
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
inner_l3_offset);
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m,
|
||||
struct rte_ipv6_hdr *, inner_l3_offset);
|
||||
/* non-TSO udp */
|
||||
udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
|
||||
inner_l3_offset + m->l3_len);
|
||||
@ -175,8 +175,8 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
|
||||
tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
|
||||
ol_flags);
|
||||
} else {
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
inner_l3_offset);
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m,
|
||||
struct rte_ipv6_hdr *, inner_l3_offset);
|
||||
/* non-TSO tcp or TSO */
|
||||
tcp_hdr = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *,
|
||||
inner_l3_offset + m->l3_len);
|
||||
|
@ -491,7 +491,7 @@ struct encap_pppoe_data {
|
||||
|
||||
struct encap_vxlan_ipv4_data {
|
||||
struct rte_ether_hdr ether;
|
||||
struct ipv4_hdr ipv4;
|
||||
struct rte_ipv4_hdr ipv4;
|
||||
struct udp_hdr udp;
|
||||
struct rte_vxlan_hdr vxlan;
|
||||
} __attribute__((__packed__));
|
||||
@ -499,14 +499,14 @@ struct encap_vxlan_ipv4_data {
|
||||
struct encap_vxlan_ipv4_vlan_data {
|
||||
struct rte_ether_hdr ether;
|
||||
struct rte_vlan_hdr vlan;
|
||||
struct ipv4_hdr ipv4;
|
||||
struct rte_ipv4_hdr ipv4;
|
||||
struct udp_hdr udp;
|
||||
struct rte_vxlan_hdr vxlan;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
struct encap_vxlan_ipv6_data {
|
||||
struct rte_ether_hdr ether;
|
||||
struct ipv6_hdr ipv6;
|
||||
struct rte_ipv6_hdr ipv6;
|
||||
struct udp_hdr udp;
|
||||
struct rte_vxlan_hdr vxlan;
|
||||
} __attribute__((__packed__));
|
||||
@ -514,7 +514,7 @@ struct encap_vxlan_ipv6_data {
|
||||
struct encap_vxlan_ipv6_vlan_data {
|
||||
struct rte_ether_hdr ether;
|
||||
struct rte_vlan_hdr vlan;
|
||||
struct ipv6_hdr ipv6;
|
||||
struct rte_ipv6_hdr ipv6;
|
||||
struct udp_hdr udp;
|
||||
struct rte_vxlan_hdr vxlan;
|
||||
} __attribute__((__packed__));
|
||||
@ -1007,7 +1007,7 @@ pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
|
||||
ipv4_total_length = ether_length +
|
||||
(sizeof(struct rte_vxlan_hdr) +
|
||||
sizeof(struct udp_hdr) +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
|
||||
rte_htons(ipv4_total_length));
|
||||
udp_length = ether_length +
|
||||
@ -1037,7 +1037,7 @@ pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
|
||||
ipv4_total_length = ether_length +
|
||||
(sizeof(struct rte_vxlan_hdr) +
|
||||
sizeof(struct udp_hdr) +
|
||||
sizeof(struct ipv4_hdr));
|
||||
sizeof(struct rte_ipv4_hdr));
|
||||
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
|
||||
rte_htons(ipv4_total_length));
|
||||
udp_length = ether_length +
|
||||
@ -1342,7 +1342,7 @@ nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
pkt_ipv4_work_nat(struct ipv4_hdr *ip,
|
||||
pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
|
||||
struct nat_ipv4_data *data,
|
||||
struct rte_table_action_nat_config *cfg)
|
||||
{
|
||||
@ -1428,7 +1428,7 @@ pkt_ipv4_work_nat(struct ipv4_hdr *ip,
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
pkt_ipv6_work_nat(struct ipv6_hdr *ip,
|
||||
pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
|
||||
struct nat_ipv6_data *data,
|
||||
struct rte_table_action_nat_config *cfg)
|
||||
{
|
||||
@ -1536,7 +1536,7 @@ ttl_apply(void *data,
|
||||
}
|
||||
|
||||
static __rte_always_inline uint64_t
|
||||
pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
|
||||
pkt_ipv4_work_ttl(struct rte_ipv4_hdr *ip,
|
||||
struct ttl_data *data)
|
||||
{
|
||||
uint32_t drop;
|
||||
@ -1557,7 +1557,7 @@ pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
|
||||
}
|
||||
|
||||
static __rte_always_inline uint64_t
|
||||
pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
|
||||
pkt_ipv6_work_ttl(struct rte_ipv6_hdr *ip,
|
||||
struct ttl_data *data)
|
||||
{
|
||||
uint32_t drop;
|
||||
@ -2893,16 +2893,16 @@ pkt_work(struct rte_mbuf *mbuf,
|
||||
uint16_t total_length;
|
||||
|
||||
if (cfg->common.ip_version) {
|
||||
struct ipv4_hdr *hdr = ip;
|
||||
struct rte_ipv4_hdr *hdr = ip;
|
||||
|
||||
dscp = hdr->type_of_service >> 2;
|
||||
total_length = rte_ntohs(hdr->total_length);
|
||||
} else {
|
||||
struct ipv6_hdr *hdr = ip;
|
||||
struct rte_ipv6_hdr *hdr = ip;
|
||||
|
||||
dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
|
||||
total_length =
|
||||
rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
|
||||
total_length = rte_ntohs(hdr->payload_len) +
|
||||
sizeof(struct rte_ipv6_hdr);
|
||||
}
|
||||
|
||||
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
|
||||
@ -3041,10 +3041,10 @@ pkt4_work(struct rte_mbuf **mbufs,
|
||||
uint16_t total_length0, total_length1, total_length2, total_length3;
|
||||
|
||||
if (cfg->common.ip_version) {
|
||||
struct ipv4_hdr *hdr0 = ip0;
|
||||
struct ipv4_hdr *hdr1 = ip1;
|
||||
struct ipv4_hdr *hdr2 = ip2;
|
||||
struct ipv4_hdr *hdr3 = ip3;
|
||||
struct rte_ipv4_hdr *hdr0 = ip0;
|
||||
struct rte_ipv4_hdr *hdr1 = ip1;
|
||||
struct rte_ipv4_hdr *hdr2 = ip2;
|
||||
struct rte_ipv4_hdr *hdr3 = ip3;
|
||||
|
||||
dscp0 = hdr0->type_of_service >> 2;
|
||||
dscp1 = hdr1->type_of_service >> 2;
|
||||
@ -3056,24 +3056,24 @@ pkt4_work(struct rte_mbuf **mbufs,
|
||||
total_length2 = rte_ntohs(hdr2->total_length);
|
||||
total_length3 = rte_ntohs(hdr3->total_length);
|
||||
} else {
|
||||
struct ipv6_hdr *hdr0 = ip0;
|
||||
struct ipv6_hdr *hdr1 = ip1;
|
||||
struct ipv6_hdr *hdr2 = ip2;
|
||||
struct ipv6_hdr *hdr3 = ip3;
|
||||
struct rte_ipv6_hdr *hdr0 = ip0;
|
||||
struct rte_ipv6_hdr *hdr1 = ip1;
|
||||
struct rte_ipv6_hdr *hdr2 = ip2;
|
||||
struct rte_ipv6_hdr *hdr3 = ip3;
|
||||
|
||||
dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
|
||||
dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
|
||||
dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
|
||||
dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
|
||||
|
||||
total_length0 =
|
||||
rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
|
||||
total_length1 =
|
||||
rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
|
||||
total_length2 =
|
||||
rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
|
||||
total_length3 =
|
||||
rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
|
||||
total_length0 = rte_ntohs(hdr0->payload_len) +
|
||||
sizeof(struct rte_ipv6_hdr);
|
||||
total_length1 = rte_ntohs(hdr1->payload_len) +
|
||||
sizeof(struct rte_ipv6_hdr);
|
||||
total_length2 = rte_ntohs(hdr2->payload_len) +
|
||||
sizeof(struct rte_ipv6_hdr);
|
||||
total_length3 = rte_ntohs(hdr3->payload_len) +
|
||||
sizeof(struct rte_ipv6_hdr);
|
||||
}
|
||||
|
||||
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
|
||||
|
@ -151,7 +151,8 @@ static void
|
||||
process_ipv4(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
|
||||
{
|
||||
/* Assume there is no ethernet header */
|
||||
struct ipv4_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct ipv4_hdr *);
|
||||
struct rte_ipv4_hdr *pkt_hdr =
|
||||
rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
|
||||
|
||||
/* Get "More fragments" flag and fragment offset */
|
||||
uint16_t frag_field = rte_be_to_cpu_16(pkt_hdr->fragment_offset);
|
||||
@ -182,7 +183,8 @@ static void
|
||||
process_ipv6(struct rte_port_ring_writer_ras *p, struct rte_mbuf *pkt)
|
||||
{
|
||||
/* Assume there is no ethernet header */
|
||||
struct ipv6_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct ipv6_hdr *);
|
||||
struct rte_ipv6_hdr *pkt_hdr =
|
||||
rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
|
||||
|
||||
struct ipv6_extension_fragment *frag_hdr;
|
||||
uint16_t frag_data = 0;
|
||||
|
@ -238,9 +238,9 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
|
||||
|
||||
/* IP cksum verification cannot be bypassed, then calculate here */
|
||||
if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
|
||||
m_buf->l2_len);
|
||||
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
|
||||
}
|
||||
@ -966,8 +966,8 @@ virtio_net_with_host_offload(struct virtio_net *dev)
|
||||
static void
|
||||
parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct rte_ipv4_hdr *ipv4_hdr;
|
||||
struct rte_ipv6_hdr *ipv6_hdr;
|
||||
void *l3_hdr = NULL;
|
||||
struct rte_ether_hdr *eth_hdr;
|
||||
uint16_t ethertype;
|
||||
@ -998,7 +998,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
|
||||
case RTE_ETHER_TYPE_IPv6:
|
||||
ipv6_hdr = l3_hdr;
|
||||
*l4_proto = ipv6_hdr->proto;
|
||||
m->l3_len = sizeof(struct ipv6_hdr);
|
||||
m->l3_len = sizeof(struct rte_ipv6_hdr);
|
||||
*l4_hdr = (char *)l3_hdr + m->l3_len;
|
||||
m->ol_flags |= PKT_TX_IPV6;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user