net: add rte prefix to UDP structure

Add 'rte_' prefix to structures:
- rename struct udp_hdr as struct rte_udp_hdr.

Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Olivier Matz 2019-05-21 18:13:14 +02:00 committed by Ferruh Yigit
parent f41b5156fe
commit e73e3547ce
40 changed files with 146 additions and 139 deletions

View File

@ -113,7 +113,7 @@ parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
((char *)ipv4_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
} else if (info->l4_proto == IPPROTO_UDP)
info->l4_len = sizeof(struct udp_hdr);
info->l4_len = sizeof(struct rte_udp_hdr);
else
info->l4_len = 0;
}
@ -133,7 +133,7 @@ parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
((char *)ipv6_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
} else if (info->l4_proto == IPPROTO_UDP)
info->l4_len = sizeof(struct udp_hdr);
info->l4_len = sizeof(struct rte_udp_hdr);
else
info->l4_len = 0;
}
@ -181,7 +181,7 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
/* Parse a vxlan header */
static void
parse_vxlan(struct udp_hdr *udp_hdr,
parse_vxlan(struct rte_udp_hdr *udp_hdr,
struct testpmd_offload_info *info,
uint32_t pkt_type)
{
@ -201,7 +201,7 @@ parse_vxlan(struct udp_hdr *udp_hdr,
info->outer_l4_proto = info->l4_proto;
eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
sizeof(struct udp_hdr) +
sizeof(struct rte_udp_hdr) +
sizeof(struct rte_vxlan_hdr));
parse_ethernet(eth_hdr, info);
@ -210,7 +210,7 @@ parse_vxlan(struct udp_hdr *udp_hdr,
/* Parse a vxlan-gpe header */
static void
parse_vxlan_gpe(struct udp_hdr *udp_hdr,
parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
struct testpmd_offload_info *info)
{
struct rte_ether_hdr *eth_hdr;
@ -224,7 +224,7 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
return;
vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
RTE_VXLAN_GPE_TYPE_IPV4) {
@ -368,7 +368,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
uint64_t tx_offloads)
{
struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
struct rte_tcp_hdr *tcp_hdr;
struct rte_sctp_hdr *sctp_hdr;
uint64_t ol_flags = 0;
@ -408,7 +408,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
return 0; /* packet type not supported, nothing to do */
if (info->l4_proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
udp_hdr->dgram_cksum = 0;
@ -461,7 +461,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
{
struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
uint64_t ol_flags = 0;
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {
@ -484,7 +484,8 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
return ol_flags;
}
udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
udp_hdr = (struct rte_udp_hdr *)
((char *)outer_l3_hdr + info->outer_l3_len);
/* outer UDP checksum is done in software. In the other side, for
* UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
@ -782,10 +783,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
/* check if it's a supported tunnel */
if (txp->parse_tunnel) {
if (info.l4_proto == IPPROTO_UDP) {
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
info.l3_len);
udp_hdr = (struct rte_udp_hdr *)
((char *)l3_hdr + info.l3_len);
parse_vxlan_gpe(udp_hdr, &info);
if (info.is_tunnel) {
tx_ol_flags |= PKT_TX_TUNNEL_VXLAN_GPE;

View File

@ -121,7 +121,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
struct rte_mbuf *pkt;
struct rte_ether_hdr *eth_hdr;
struct rte_ipv4_hdr *ip_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
uint16_t vlan_tci, vlan_tci_outer;
uint64_t ol_flags = 0;
uint16_t nb_rx;
@ -193,7 +193,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
sizeof(*ip_hdr));
/* Initialize UDP header. */
udp_hdr = (struct udp_hdr *)(ip_hdr + 1);
udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1);
udp_hdr->src_port = rte_cpu_to_be_16(cfg_udp_src);
udp_hdr->dst_port = rte_cpu_to_be_16(cfg_udp_dst);
udp_hdr->dgram_cksum = 0; /* No UDP checksum. */

View File

@ -55,7 +55,7 @@ uint32_t tx_ip_dst_addr = (192U << 24) | (18 << 16) | (0 << 8) | 2;
static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */
static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */
static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
@ -96,7 +96,7 @@ copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
static void
setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
struct udp_hdr *udp_hdr,
struct rte_udp_hdr *udp_hdr,
uint16_t pkt_data_len)
{
uint16_t *ptr16;
@ -106,7 +106,7 @@ setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr,
/*
* Initialize UDP header.
*/
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port);
udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port);
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
@ -350,7 +350,7 @@ tx_only_begin(__attribute__((unused)) portid_t pi)
pkt_data_len = (uint16_t) (tx_pkt_length - (
sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
sizeof(struct udp_hdr)));
sizeof(struct rte_udp_hdr)));
setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
}

View File

@ -105,7 +105,7 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
if (is_encapsulation) {
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
uint8_t l2_len;
uint8_t l3_len;
uint8_t l4_len;
@ -130,9 +130,9 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
}
if (l4_proto == IPPROTO_UDP) {
udp_hdr = rte_pktmbuf_mtod_offset(mb,
struct udp_hdr *,
struct rte_udp_hdr *,
l2_len + l3_len);
l4_len = sizeof(struct udp_hdr);
l4_len = sizeof(struct rte_udp_hdr);
vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
struct rte_vxlan_hdr *,
l2_len + l3_len + l4_len);

View File

@ -93,12 +93,12 @@ initialize_arp_header(struct rte_arp_hdr *arp_hdr,
}
uint16_t
initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
initialize_udp_header(struct rte_udp_hdr *udp_hdr, uint16_t src_port,
uint16_t dst_port, uint16_t pkt_data_len)
{
uint16_t pkt_len;
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
udp_hdr->src_port = rte_cpu_to_be_16(src_port);
udp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
@ -129,7 +129,7 @@ initialize_sctp_header(struct rte_sctp_hdr *sctp_hdr, uint16_t src_port,
{
uint16_t pkt_len;
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
sctp_hdr->src_port = rte_cpu_to_be_16(src_port);
sctp_hdr->dst_port = rte_cpu_to_be_16(dst_port);
@ -260,7 +260,7 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
int
generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
void *ip_hdr, uint8_t ipv4, struct udp_hdr *udp_hdr,
void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
{
int i, nb_pkt = 0;
@ -390,7 +390,7 @@ generate_packet_burst_proto(struct rte_mempool *mp,
switch (proto) {
case IPPROTO_UDP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct udp_hdr), pkt,
sizeof(struct rte_udp_hdr), pkt,
eth_hdr_size +
sizeof(struct rte_ipv4_hdr));
break;
@ -415,7 +415,7 @@ generate_packet_burst_proto(struct rte_mempool *mp,
switch (proto) {
case IPPROTO_UDP:
copy_buf_to_pkt(proto_hdr,
sizeof(struct udp_hdr), pkt,
sizeof(struct rte_udp_hdr), pkt,
eth_hdr_size +
sizeof(struct rte_ipv6_hdr));
break;

View File

@ -35,7 +35,7 @@ initialize_arp_header(struct rte_arp_hdr *arp_hdr,
uint32_t src_ip, uint32_t dst_ip, uint32_t opcode);
uint16_t
initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port,
initialize_udp_header(struct rte_udp_hdr *udp_hdr, uint16_t src_port,
uint16_t dst_port, uint16_t pkt_data_len);
uint16_t
@ -61,7 +61,7 @@ initialize_ipv4_header_proto(struct rte_ipv4_hdr *ip_hdr, uint32_t src_addr,
int
generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst,
struct rte_ether_hdr *eth_hdr, uint8_t vlan_enabled,
void *ip_hdr, uint8_t ipv4, struct udp_hdr *udp_hdr,
void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr,
int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
int

View File

@ -493,7 +493,7 @@ init_ipv4_udp_traffic(struct rte_mempool *mp,
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
struct udp_hdr pkt_udp_hdr;
struct rte_udp_hdr pkt_udp_hdr;
uint32_t src_addr = IPV4_ADDR(2, 2, 2, 3);
uint32_t dst_addr = IPV4_ADDR(2, 2, 2, 7);
uint16_t src_port = 32;

View File

@ -80,13 +80,13 @@ struct link_bonding_unittest_params {
struct rte_ether_hdr *pkt_eth_hdr;
struct rte_ipv4_hdr *pkt_ipv4_hdr;
struct rte_ipv6_hdr *pkt_ipv6_hdr;
struct udp_hdr *pkt_udp_hdr;
struct rte_udp_hdr *pkt_udp_hdr;
};
static struct rte_ipv4_hdr pkt_ipv4_hdr;
static struct rte_ipv6_hdr pkt_ipv6_hdr;
static struct udp_hdr pkt_udp_hdr;
static struct rte_udp_hdr pkt_udp_hdr;
static struct link_bonding_unittest_params default_params = {
.bonded_port_id = -1,

View File

@ -733,7 +733,7 @@ generate_packets(struct rte_ether_addr *src_mac,
uint32_t ip_dst[4] = { [0 ... 2] = 0xFEEDFACE, [3] = RTE_IPv4(192, 168, 0, 2) };
struct rte_ether_hdr pkt_eth_hdr;
struct udp_hdr pkt_udp_hdr;
struct rte_udp_hdr pkt_udp_hdr;
union {
struct rte_ipv4_hdr v4;
struct rte_ipv6_hdr v6;

View File

@ -184,7 +184,7 @@ init_traffic(struct rte_mempool *mp,
{
struct rte_ether_hdr pkt_eth_hdr;
struct rte_ipv4_hdr pkt_ipv4_hdr;
struct udp_hdr pkt_udp_hdr;
struct rte_udp_hdr pkt_udp_hdr;
uint32_t pktlen;
static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };

View File

@ -839,7 +839,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
size_t vlan_offset;
int i;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
struct rte_tcp_hdr *tcp_hdr;
uint32_t hash, l3hash, l4hash;
@ -874,7 +874,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv4_hdr->next_proto_id ==
IPPROTO_UDP) {
udp_hdr = (struct udp_hdr *)
udp_hdr = (struct rte_udp_hdr *)
((char *)ipv4_hdr +
ip_hdr_offset);
if ((size_t)udp_hdr + sizeof(*udp_hdr)
@ -891,7 +891,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
tcp_hdr = (struct rte_tcp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(tcp_hdr);
} else if (ipv6_hdr->proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr *)(ipv6_hdr + 1);
udp_hdr = (struct rte_udp_hdr *)(ipv6_hdr + 1);
l4hash = HASH_L4_PORTS(udp_hdr);
}
}

View File

@ -230,7 +230,7 @@ static inline void dpaa_checksum(struct rte_mbuf *mbuf)
tcp_hdr);
} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
RTE_PTYPE_L4_UDP) {
struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr +
mbuf->l3_len);
udp_hdr->dgram_cksum = 0;
if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))

View File

@ -244,7 +244,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
switch (flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
offsetof(struct udp_hdr, dgram_cksum));
offsetof(struct rte_udp_hdr, dgram_cksum));
cmp_mask |= TX_MACIP_LEN_CMP_MASK;
break;
case PKT_TX_TCP_CKSUM:

View File

@ -289,7 +289,8 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,
case PKT_TX_UDP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= sizeof(struct rte_udp_hdr)
<< E1000_ADVTXD_L4LEN_SHIFT;
break;
case PKT_TX_TCP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |

View File

@ -120,7 +120,7 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
memset(gp, 0, sizeof(*gp));
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
struct udp_hdr udp_mask, udp_val;
struct rte_udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
@ -134,7 +134,7 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
&udp_mask, &udp_val, sizeof(struct udp_hdr));
&udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
@ -216,7 +216,7 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
struct udp_hdr udp_mask, udp_val;
struct rte_udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
@ -229,7 +229,7 @@ copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
udp_val.dst_port = input->flow.udp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
&udp_mask, &udp_val, sizeof(struct udp_hdr));
&udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));

View File

@ -449,7 +449,7 @@ enic_copy_item_udp_v1(struct copy_item_args *arg)
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
struct udp_hdr supported_mask = {
struct rte_udp_hdr supported_mask = {
.src_port = 0xffff,
.dst_port = 0xffff,
};
@ -639,7 +639,7 @@ enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
mask = &rte_flow_item_udp_mask;
/* Append udp header to L5 and set ip proto = udp */
return copy_inner_common(&arg->filter->u.generic_1, off,
arg->item->spec, mask, sizeof(struct udp_hdr),
arg->item->spec, mask, sizeof(struct rte_udp_hdr),
arg->l3_proto_off, IPPROTO_UDP, 1);
}
@ -831,9 +831,9 @@ enic_copy_item_udp_v2(struct copy_item_args *arg)
mask = &rte_flow_item_udp_mask;
memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
return 0;
}
@ -925,7 +925,7 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg)
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
struct filter_generic_1 *gp = &enic_filter->u.generic_1;
struct udp_hdr *udp;
struct rte_udp_hdr *udp;
FLOW_TRACE();
@ -935,9 +935,9 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg)
*/
gp->mask_flags |= FILTER_GENERIC_1_UDP;
gp->val_flags |= FILTER_GENERIC_1_UDP;
udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
udp->dst_port = 0xffff;
udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
udp->dst_port = RTE_BE16(4789);
/* Match all if no spec */
if (!spec)
@ -983,7 +983,7 @@ enic_copy_item_raw_v2(struct copy_item_args *arg)
return EINVAL;
/* Need non-null pattern that fits within the NIC's filter pattern */
if (spec->length == 0 ||
spec->length + sizeof(struct udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN ||
!spec->pattern || !mask->pattern)
return EINVAL;
/*
@ -996,9 +996,9 @@ enic_copy_item_raw_v2(struct copy_item_args *arg)
*/
if (mask->length != 0 && mask->length < spec->length)
return EINVAL;
memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
mask->pattern, spec->length);
memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
spec->pattern, spec->length);
return 0;
@ -1052,9 +1052,9 @@ fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
return;
FLOW_TRACE();
vxlan = sizeof(struct rte_vxlan_hdr);
memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr),
gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr),
gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
inner = inner_ofst - vxlan;
memset(layer, 0, sizeof(layer));

View File

@ -799,7 +799,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
unsigned char *raw_pkt)
{
unsigned char *payload, *ptr;
struct udp_hdr *udp;
struct rte_udp_hdr *udp;
struct rte_tcp_hdr *tcp;
struct rte_sctp_hdr *sctp;
uint8_t size, dst = 0;
@ -815,8 +815,8 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
/* fill the L4 head */
switch (fdir_input->flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
udp = (struct rte_udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@ -860,8 +860,8 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
udp = (struct rte_udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@ -1089,7 +1089,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
{
unsigned char *payload = NULL;
unsigned char *ptr;
struct udp_hdr *udp;
struct rte_udp_hdr *udp;
struct rte_tcp_hdr *tcp;
struct rte_sctp_hdr *sctp;
struct rte_flow_item_gtp *gtp;
@ -1116,8 +1116,8 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
/* fill the L4 head */
if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
udp = (struct rte_udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@ -1153,8 +1153,8 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
payload = raw_pkt + len;
set_idx = I40E_FLXPLD_L3_IDX;
} else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct udp_hdr);
udp = (struct rte_udp_hdr *)(raw_pkt + len);
payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
/**
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@ -1206,12 +1206,13 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
udp = (struct udp_hdr *)(raw_pkt + len);
udp = (struct rte_udp_hdr *)(raw_pkt + len);
udp->dgram_len =
rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
gtp = (struct rte_flow_item_gtp *)
((unsigned char *)udp + sizeof(struct udp_hdr));
((unsigned char *)udp +
sizeof(struct rte_udp_hdr));
gtp->msg_len =
rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
gtp->teid = fdir_input->flow.gtp_flow.teid;

View File

@ -312,7 +312,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
break;
case PKT_TX_UDP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct udp_hdr) >> 2) <<
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:

View File

@ -1427,7 +1427,7 @@ iavf_txd_enable_checksum(uint64_t ol_flags,
break;
case PKT_TX_UDP_CKSUM:
*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct udp_hdr) >> 2) <<
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:

View File

@ -1787,7 +1787,7 @@ ice_txd_enable_checksum(uint64_t ol_flags,
break;
case PKT_TX_UDP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
*td_offset |= (sizeof(struct udp_hdr) >> 2) <<
*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
default:

View File

@ -422,7 +422,8 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
case PKT_TX_UDP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= sizeof(struct rte_udp_hdr)
<< IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
break;

View File

@ -2602,11 +2602,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
/* Handle L4. */
switch (fdir_filter->input.flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
attributes->l4.udp.hdr = (struct udp_hdr){
attributes->l4.udp.hdr = (struct rte_udp_hdr){
.src_port = input->flow.udp4_flow.src_port,
.dst_port = input->flow.udp4_flow.dst_port,
};
attributes->l4_mask.udp.hdr = (struct udp_hdr){
attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};
@ -2632,11 +2632,11 @@ flow_fdir_filter_convert(struct rte_eth_dev *dev,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
attributes->l4.udp.hdr = (struct udp_hdr){
attributes->l4.udp.hdr = (struct rte_udp_hdr){
.src_port = input->flow.udp6_flow.src_port,
.dst_port = input->flow.udp6_flow.dst_port,
};
attributes->l4_mask.udp.hdr = (struct udp_hdr){
attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
.src_port = mask->src_port_mask,
.dst_port = mask->dst_port_mask,
};

View File

@ -1215,7 +1215,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
struct rte_vlan_hdr *vlan = NULL;
struct rte_ipv4_hdr *ipv4 = NULL;
struct rte_ipv6_hdr *ipv6 = NULL;
struct udp_hdr *udp = NULL;
struct rte_udp_hdr *udp = NULL;
struct rte_vxlan_hdr *vxlan = NULL;
struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
struct rte_gre_hdr *gre = NULL;
@ -1286,7 +1286,7 @@ flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
udp = (struct udp_hdr *)&buf[temp_size];
udp = (struct rte_udp_hdr *)&buf[temp_size];
if (!ipv4 && !ipv6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,

View File

@ -459,7 +459,7 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
uint8_t *raw_pkt;
struct rte_ipv4_hdr *ip;
struct rte_ipv6_hdr *ip6;
struct udp_hdr *udp;
struct rte_udp_hdr *udp;
struct rte_tcp_hdr *tcp;
uint16_t len;
@ -487,13 +487,13 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
raw_pkt = (uint8_t *)buff;
/* UDP */
if (arfs->tuple.ip_proto == IPPROTO_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
udp = (struct rte_udp_hdr *)(raw_pkt + len);
udp->dst_port = arfs->tuple.dst_port;
udp->src_port = arfs->tuple.src_port;
udp->dgram_len = sizeof(struct udp_hdr);
len += sizeof(struct udp_hdr);
udp->dgram_len = sizeof(struct rte_udp_hdr);
len += sizeof(struct rte_udp_hdr);
/* adjust ip total_length */
ip->total_length += sizeof(struct udp_hdr);
ip->total_length += sizeof(struct rte_udp_hdr);
params->udp = true;
} else { /* TCP */
tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
@ -522,10 +522,10 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
raw_pkt = (uint8_t *)buff;
/* UDP */
if (arfs->tuple.ip_proto == IPPROTO_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
udp = (struct rte_udp_hdr *)(raw_pkt + len);
udp->src_port = arfs->tuple.src_port;
udp->dst_port = arfs->tuple.dst_port;
len += sizeof(struct udp_hdr);
len += sizeof(struct rte_udp_hdr);
params->udp = true;
} else { /* TCP */
tcp = (struct rte_tcp_hdr *)(raw_pkt + len);

View File

@ -508,7 +508,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
l4_hdr = packet + l2_len + l3_len;
if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
*l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
*l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
*l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
else

View File

@ -539,7 +539,7 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
switch (cookie->ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_UDP_CKSUM:
hdr->csum_start = cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct udp_hdr,
hdr->csum_offset = offsetof(struct rte_udp_hdr,
dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;

View File

@ -545,7 +545,9 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
offsetof(struct rte_tcp_hdr, cksum);
break;
case PKT_TX_UDP_CKSUM:
gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
gdesc->txd.msscof = gdesc->txd.hlen +
offsetof(struct rte_udp_hdr,
dgram_cksum);
break;
default:
PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",

View File

@ -528,7 +528,7 @@ get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
{
struct ipv4_5tuple key;
struct rte_tcp_hdr *tcp;
struct udp_hdr *udp;
struct rte_udp_hdr *udp;
int ret = 0;
key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
@ -544,7 +544,7 @@ get_ipv4_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
break;
case IPPROTO_UDP:
udp = (struct udp_hdr *)((unsigned char *)ipv4_hdr +
udp = (struct rte_udp_hdr *)((unsigned char *)ipv4_hdr +
sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);
@ -567,7 +567,7 @@ get_ipv6_dst_port(struct rte_ipv6_hdr *ipv6_hdr, uint16_t portid,
{
struct ipv6_5tuple key;
struct rte_tcp_hdr *tcp;
struct udp_hdr *udp;
struct rte_udp_hdr *udp;
int ret = 0;
memcpy(key.ip_dst, ipv6_hdr->dst_addr, IPV6_ADDR_LEN);
@ -584,7 +584,7 @@ get_ipv6_dst_port(struct rte_ipv6_hdr *ipv6_hdr, uint16_t portid,
break;
case IPPROTO_UDP:
udp = (struct udp_hdr *)((unsigned char *) ipv6_hdr +
udp = (struct rte_udp_hdr *)((unsigned char *) ipv6_hdr +
sizeof(struct rte_ipv6_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);

View File

@ -367,7 +367,7 @@ get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
{
struct ipv4_5tuple key;
struct rte_tcp_hdr *tcp;
struct udp_hdr *udp;
struct rte_udp_hdr *udp;
int ret = 0;
key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
@ -383,7 +383,7 @@ get_dst_port(struct rte_ipv4_hdr *ipv4_hdr, uint16_t portid,
break;
case IPPROTO_UDP:
udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
udp = (struct rte_udp_hdr *)((unsigned char *) ipv4_hdr +
sizeof(struct rte_ipv4_hdr));
key.port_dst = rte_be_to_cpu_16(udp->dst_port);
key.port_src = rte_be_to_cpu_16(udp->src_port);

View File

@ -77,7 +77,7 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr,
uint16_t ethertype;
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
struct rte_tcp_hdr *tcp_hdr;
struct rte_sctp_hdr *sctp_hdr;
uint64_t ol_flags = 0;
@ -110,7 +110,7 @@ process_inner_cksums(struct rte_ether_hdr *eth_hdr,
return 0; /* packet type not supported, nothing to do */
if (l4_proto == IPPROTO_UDP) {
udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
ol_flags |= PKT_TX_UDP_CKSUM;
udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
ethertype, ol_flags);
@ -143,7 +143,7 @@ decapsulation(struct rte_mbuf *pkt)
{
uint8_t l4_proto = 0;
uint16_t outer_header_len;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
union tunnel_offload_info info = { .data = 0 };
struct rte_ether_hdr *phdr =
rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
@ -153,7 +153,7 @@ decapsulation(struct rte_mbuf *pkt)
if (l4_proto != IPPROTO_UDP)
return -1;
udp_hdr = (struct udp_hdr *)((char *)phdr +
udp_hdr = (struct rte_udp_hdr *)((char *)phdr +
info.outer_l2_len + info.outer_l3_len);
/** check udp destination port, 4789 is the default vxlan port
@ -163,7 +163,7 @@ decapsulation(struct rte_mbuf *pkt)
(pkt->packet_type & RTE_PTYPE_TUNNEL_MASK) == 0)
return -1;
outer_header_len = info.outer_l2_len + info.outer_l3_len
+ sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr);
+ sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr);
rte_pktmbuf_adj(pkt, outer_header_len);
@ -184,10 +184,10 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id)
struct rte_ether_hdr *pneth =
(struct rte_ether_hdr *) rte_pktmbuf_prepend(m,
sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr)
+ sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr));
+ sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr));
struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *) &pneth[1];
struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
struct rte_vxlan_hdr *vxlan = (struct rte_vxlan_hdr *) &udp[1];
/* convert TX queue ID to vport ID */
@ -231,7 +231,7 @@ encapsulation(struct rte_mbuf *m, uint8_t queue_id)
/*UDP HEADER*/
udp->dgram_cksum = 0;
udp->dgram_len = rte_cpu_to_be_16(old_len
+ sizeof(struct udp_hdr)
+ sizeof(struct rte_udp_hdr)
+ sizeof(struct rte_vxlan_hdr));
udp->dst_port = rte_cpu_to_be_16(vxdev.dst_port);

View File

@ -692,7 +692,7 @@ static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
* Matches a UDP header.
*/
struct rte_flow_item_udp {
struct udp_hdr hdr; /**< UDP header definition. */
struct rte_udp_hdr hdr; /**< UDP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */

View File

@ -264,7 +264,7 @@ static inline void
update_vxlan_header(struct gro_vxlan_tcp4_item *item)
{
struct rte_ipv4_hdr *ipv4_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
struct rte_mbuf *pkt = item->inner_item.firstseg;
uint16_t len;
@ -276,7 +276,7 @@ update_vxlan_header(struct gro_vxlan_tcp4_item *item)
/* Update the outer UDP header. */
len -= pkt->outer_l3_len;
udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len);
udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len);
udp_hdr->dgram_len = rte_cpu_to_be_16(len);
/* Update the inner IPv4 header. */
@ -293,7 +293,7 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
struct rte_ether_hdr *outer_eth_hdr, *eth_hdr;
struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
struct rte_tcp_hdr *tcp_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
struct rte_vxlan_hdr *vxlan_hdr;
uint32_t sent_seq;
int32_t tcp_dl;
@ -317,10 +317,10 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr +
pkt->outer_l2_len);
udp_hdr = (struct udp_hdr *)((char *)outer_ipv4_hdr +
udp_hdr = (struct rte_udp_hdr *)((char *)outer_ipv4_hdr +
pkt->outer_l3_len);
vxlan_hdr = (struct rte_vxlan_hdr *)((char *)udp_hdr +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr +
sizeof(struct rte_vxlan_hdr));
ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);

View File

@ -46,9 +46,9 @@
static inline void
update_udp_header(struct rte_mbuf *pkt, uint16_t udp_offset)
{
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
udp_hdr = (struct udp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
udp_hdr = (struct rte_udp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
udp_offset);
udp_hdr->dgram_len = rte_cpu_to_be_16(pkt->pkt_len - udp_offset);
}

View File

@ -23,7 +23,7 @@ extern "C" {
/* Minimum GSO segment size for UDP based packets. */
#define RTE_GSO_UDP_SEG_SIZE_MIN (sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_ipv4_hdr) + sizeof(struct udp_hdr) + 1)
sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + 1)
/* GSO flags for rte_gso_ctx. */
#define RTE_GSO_FLAG_IPID_FIXED (1ULL << 0)

View File

@ -316,7 +316,7 @@ struct rte_vxlan_hdr {
#define RTE_ETHER_TYPE_MPLSM 0x8848 /**< MPLS multicast ethertype. */
#define RTE_ETHER_VXLAN_HLEN \
(sizeof(struct udp_hdr) + sizeof(struct rte_vxlan_hdr))
(sizeof(struct rte_udp_hdr) + sizeof(struct rte_vxlan_hdr))
/**< VXLAN tunnel header length. */
/**
@ -340,7 +340,7 @@ struct rte_vxlan_gpe_hdr {
#define RTE_VXLAN_GPE_TYPE_GBP 6 /**< GBP Protocol. */
#define RTE_VXLAN_GPE_TYPE_VBNG 7 /**< vBNG Protocol. */
#define RTE_ETHER_VXLAN_GPE_HLEN (sizeof(struct udp_hdr) + \
#define RTE_ETHER_VXLAN_GPE_HLEN (sizeof(struct rte_udp_hdr) + \
sizeof(struct rte_vxlan_gpe_hdr))
/**< VXLAN-GPE tunnel header length. */

View File

@ -357,7 +357,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
}
if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP) {
hdr_lens->l4_len = sizeof(struct udp_hdr);
hdr_lens->l4_len = sizeof(struct rte_udp_hdr);
return pkt_type;
} else if ((pkt_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
const struct rte_tcp_hdr *th;
@ -493,7 +493,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
}
if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP) {
hdr_lens->inner_l4_len = sizeof(struct udp_hdr);
hdr_lens->inner_l4_len = sizeof(struct rte_udp_hdr);
} else if ((pkt_type & RTE_PTYPE_INNER_L4_MASK) ==
RTE_PTYPE_INNER_L4_TCP) {
const struct rte_tcp_hdr *th;

View File

@ -115,7 +115,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
struct rte_ipv4_hdr *ipv4_hdr;
struct rte_ipv6_hdr *ipv6_hdr;
struct rte_tcp_hdr *tcp_hdr;
struct udp_hdr *udp_hdr;
struct rte_udp_hdr *udp_hdr;
uint64_t inner_l3_offset = m->l2_len;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
@ -153,7 +153,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
if (ol_flags & PKT_TX_IPV4) {
udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
m->l3_len);
udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
ol_flags);
@ -161,7 +161,8 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
ipv6_hdr = rte_pktmbuf_mtod_offset(m,
struct rte_ipv6_hdr *, inner_l3_offset);
/* non-TSO udp */
udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
udp_hdr = rte_pktmbuf_mtod_offset(m,
struct rte_udp_hdr *,
inner_l3_offset + m->l3_len);
udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
ol_flags);

View File

@ -23,7 +23,7 @@ extern "C" {
/**
* UDP Header
*/
struct udp_hdr {
struct rte_udp_hdr {
uint16_t src_port; /**< UDP source port. */
uint16_t dst_port; /**< UDP destination port. */
uint16_t dgram_len; /**< UDP datagram length */

View File

@ -492,7 +492,7 @@ struct encap_pppoe_data {
struct encap_vxlan_ipv4_data {
struct rte_ether_hdr ether;
struct rte_ipv4_hdr ipv4;
struct udp_hdr udp;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
@ -500,14 +500,14 @@ struct encap_vxlan_ipv4_vlan_data {
struct rte_ether_hdr ether;
struct rte_vlan_hdr vlan;
struct rte_ipv4_hdr ipv4;
struct udp_hdr udp;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
struct encap_vxlan_ipv6_data {
struct rte_ether_hdr ether;
struct rte_ipv6_hdr ipv6;
struct udp_hdr udp;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
@ -515,7 +515,7 @@ struct encap_vxlan_ipv6_vlan_data {
struct rte_ether_hdr ether;
struct rte_vlan_hdr vlan;
struct rte_ipv6_hdr ipv6;
struct udp_hdr udp;
struct rte_udp_hdr udp;
struct rte_vxlan_hdr vxlan;
} __attribute__((__packed__));
@ -1006,13 +1006,13 @@ pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
ether_length = (uint16_t)mbuf->pkt_len;
ipv4_total_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr) +
sizeof(struct rte_udp_hdr) +
sizeof(struct rte_ipv4_hdr));
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
rte_htons(ipv4_total_length));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
@ -1036,13 +1036,13 @@ pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
ether_length = (uint16_t)mbuf->pkt_len;
ipv4_total_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr) +
sizeof(struct rte_udp_hdr) +
sizeof(struct rte_ipv4_hdr));
ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
rte_htons(ipv4_total_length));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
@ -1066,10 +1066,10 @@ pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
ether_length = (uint16_t)mbuf->pkt_len;
ipv6_payload_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
@ -1092,10 +1092,10 @@ pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
ether_length = (uint16_t)mbuf->pkt_len;
ipv6_payload_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
udp_length = ether_length +
(sizeof(struct rte_vxlan_hdr) +
sizeof(struct udp_hdr));
sizeof(struct rte_udp_hdr));
vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
@ -1366,7 +1366,7 @@ pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
tcp->src_port = data->port;
tcp->cksum = tcp_cksum;
} else {
struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t ip_cksum, udp_cksum;
ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
@ -1405,7 +1405,7 @@ pkt_ipv4_work_nat(struct rte_ipv4_hdr *ip,
tcp->dst_port = data->port;
tcp->cksum = tcp_cksum;
} else {
struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t ip_cksum, udp_cksum;
ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
@ -1447,7 +1447,7 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
tcp->src_port = data->port;
tcp->cksum = tcp_cksum;
} else {
struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t udp_cksum;
udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
@ -1475,7 +1475,7 @@ pkt_ipv6_work_nat(struct rte_ipv6_hdr *ip,
tcp->dst_port = data->port;
tcp->cksum = tcp_cksum;
} else {
struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
struct rte_udp_hdr *udp = (struct rte_udp_hdr *) &ip[1];
uint16_t udp_cksum;
udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,

View File

@ -222,7 +222,7 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
cksum));
break;
case PKT_TX_UDP_CKSUM:
net_hdr->csum_offset = (offsetof(struct udp_hdr,
net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
dgram_cksum));
break;
case PKT_TX_SCTP_CKSUM:
@ -1028,7 +1028,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
if (l4_proto == IPPROTO_TCP)
m->ol_flags |= PKT_TX_TCP_CKSUM;
break;
case (offsetof(struct udp_hdr, dgram_cksum)):
case (offsetof(struct rte_udp_hdr, dgram_cksum)):
if (l4_proto == IPPROTO_UDP)
m->ol_flags |= PKT_TX_UDP_CKSUM;
break;
@ -1054,7 +1054,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
case VIRTIO_NET_HDR_GSO_UDP:
m->ol_flags |= PKT_TX_UDP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = sizeof(struct udp_hdr);
m->l4_len = sizeof(struct rte_udp_hdr);
break;
default:
RTE_LOG(WARNING, VHOST_DATA,