app/testpmd: VXLAN Tx checksum offload

Add test cases in testpmd to test VxLAN Tx checksum offload, which includes
 - IPv4 and IPv6 packet
 - outer L3, inner L3 and L4 checksum offload for Tx side.

Signed-off-by: Jijiang Liu <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
Tested-by: Yong Liu <yong.liu@intel.com>
This commit is contained in:
Jijiang Liu 2014-10-23 21:19:00 +08:00 committed by Thomas Monjalon
parent 77b8301733
commit 9deb1704dd
3 changed files with 192 additions and 21 deletions

View File

@ -310,13 +310,17 @@ static void cmd_help_long_parsed(void *parsed_result,
" Disable hardware insertion of a VLAN header in"
" packets sent on a port.\n\n"
"tx_checksum set mask (port_id)\n"
"tx_checksum set (mask) (port_id)\n"
" Enable hardware insertion of checksum offload with"
" the 4-bit mask, 0~0xf, in packets sent on a port.\n"
" the 8-bit mask, 0~0xff, in packets sent on a port.\n"
" bit 0 - insert ip checksum offload if set\n"
" bit 1 - insert udp checksum offload if set\n"
" bit 2 - insert tcp checksum offload if set\n"
" bit 3 - insert sctp checksum offload if set\n"
" bit 4 - insert inner ip checksum offload if set\n"
" bit 5 - insert inner udp checksum offload if set\n"
" bit 6 - insert inner tcp checksum offload if set\n"
" bit 7 - insert inner sctp checksum offload if set\n"
" Please check the NIC datasheet for HW limits.\n\n"
"set fwd (%s)\n"
@ -2763,8 +2767,9 @@ cmdline_parse_inst_t cmd_tx_cksum_set = {
.f = cmd_tx_cksum_set_parsed,
.data = NULL,
.help_str = "enable hardware insertion of L3/L4checksum with a given "
"mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip"
"Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP",
"mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip, "
"Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP, Bit 4 for inner ip, "
"Bit 5 for inner UDP, Bit 6 for inner TCP, Bit 7 for inner SCTP",
.tokens = {
(void *)&cmd_tx_cksum_set_tx_cksum,
(void *)&cmd_tx_cksum_set_set,

View File

@ -1753,9 +1753,9 @@ tx_cksum_set(portid_t port_id, uint64_t ol_flags)
uint64_t tx_ol_flags;
if (port_id_is_invalid(port_id))
return;
/* Clear last 4 bits and then set L3/4 checksum mask again */
tx_ol_flags = ports[port_id].tx_ol_flags & (~0x0Full);
ports[port_id].tx_ol_flags = ((ol_flags & 0xf) | tx_ol_flags);
/* Clear last 8 bits and then set L3/4 checksum mask again */
tx_ol_flags = ports[port_id].tx_ol_flags & (~0x0FFull);
ports[port_id].tx_ol_flags = ((ol_flags & 0xff) | tx_ol_flags);
}
void

View File

@ -209,10 +209,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
struct rte_mbuf *mb;
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
struct ether_hdr *inner_eth_hdr;
struct ipv4_hdr *inner_ipv4_hdr = NULL;
struct ipv6_hdr *ipv6_hdr;
struct ipv6_hdr *inner_ipv6_hdr = NULL;
struct udp_hdr *udp_hdr;
struct udp_hdr *inner_udp_hdr;
struct tcp_hdr *tcp_hdr;
struct tcp_hdr *inner_tcp_hdr;
struct sctp_hdr *sctp_hdr;
struct sctp_hdr *inner_sctp_hdr;
uint16_t nb_rx;
uint16_t nb_tx;
@ -221,12 +227,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint64_t pkt_ol_flags;
uint64_t tx_ol_flags;
uint16_t l4_proto;
uint16_t inner_l4_proto = 0;
uint16_t eth_type;
uint8_t l2_len;
uint8_t l3_len;
uint8_t inner_l2_len = 0;
uint8_t inner_l3_len = 0;
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
uint8_t ipv4_tunnel;
uint8_t ipv6_tunnel;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@ -262,7 +273,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
l2_len = sizeof(struct ether_hdr);
pkt_ol_flags = mb->ol_flags;
ol_flags = (pkt_ol_flags & (~PKT_TX_L4_MASK));
ipv4_tunnel = (pkt_ol_flags & PKT_RX_TUNNEL_IPV4_HDR) ?
1 : 0;
ipv6_tunnel = (pkt_ol_flags & PKT_RX_TUNNEL_IPV6_HDR) ?
1 : 0;
eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
@ -295,7 +309,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
* + ipv4 or ipv6
* + udp or tcp or sctp or others
*/
if (pkt_ol_flags & PKT_RX_IPV4_HDR) {
if (pkt_ol_flags & (PKT_RX_IPV4_HDR | PKT_RX_TUNNEL_IPV4_HDR)) {
/* Do not support ipv4 option field */
l3_len = sizeof(struct ipv4_hdr) ;
@ -325,17 +339,92 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (tx_ol_flags & 0x2) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
/* Pseudo header sum need be set properly */
udp_hdr->dgram_cksum = get_ipv4_psd_sum(ipv4_hdr);
if (ipv4_tunnel)
udp_hdr->dgram_cksum = 0;
else
/* Pseudo header sum need be set properly */
udp_hdr->dgram_cksum =
get_ipv4_psd_sum(ipv4_hdr);
}
else {
/* SW Implementation, clear checksum field first */
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
(uint16_t*)udp_hdr);
(uint16_t *)udp_hdr);
}
}
else if (l4_proto == IPPROTO_TCP){
if (ipv4_tunnel) {
uint16_t len;
/* Check if inner L3/L4 checkum flag is set */
if (tx_ol_flags & 0xF0)
ol_flags |= PKT_TX_VXLAN_CKSUM;
inner_l2_len = sizeof(struct ether_hdr);
inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len
+ ETHER_VXLAN_HLEN);
eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
inner_l2_len += sizeof(struct vlan_hdr);
eth_type = rte_be_to_cpu_16(*(uint16_t *)
((uintptr_t)&eth_hdr->ether_type +
sizeof(struct vlan_hdr)));
}
len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len;
if (eth_type == ETHER_TYPE_IPv4) {
inner_l3_len = sizeof(struct ipv4_hdr);
inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len);
inner_l4_proto = inner_ipv4_hdr->next_proto_id;
if (tx_ol_flags & 0x10) {
/* Do not delete, this is required by HW*/
inner_ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_IPV4_CSUM;
}
} else if (eth_type == ETHER_TYPE_IPv6) {
inner_l3_len = sizeof(struct ipv6_hdr);
inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len);
inner_l4_proto = inner_ipv6_hdr->proto;
}
if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len + inner_l3_len);
if (eth_type == ETHER_TYPE_IPv4)
inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
else if (eth_type == ETHER_TYPE_IPv6)
inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
} else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
/* HW Offload */
ol_flags |= PKT_TX_TCP_CKSUM;
inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len + inner_l3_len);
if (eth_type == ETHER_TYPE_IPv4)
inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
else if (eth_type == ETHER_TYPE_IPv6)
inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
} else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
/* HW Offload */
ol_flags |= PKT_TX_SCTP_CKSUM;
inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len + inner_l3_len);
inner_sctp_hdr->cksum = 0;
}
}
} else if (l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
if (tx_ol_flags & 0x4) {
@ -347,8 +436,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
tcp_hdr->cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
(uint16_t*)tcp_hdr);
}
}
else if (l4_proto == IPPROTO_SCTP) {
} else if (l4_proto == IPPROTO_SCTP) {
sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len);
@ -367,9 +455,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
/* End of L4 Handling*/
}
else if (pkt_ol_flags & PKT_RX_IPV6_HDR) {
} else if (pkt_ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_TUNNEL_IPV6_HDR)) {
ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len);
l3_len = sizeof(struct ipv6_hdr) ;
@ -382,15 +468,93 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (tx_ol_flags & 0x2) {
/* HW Offload */
ol_flags |= PKT_TX_UDP_CKSUM;
udp_hdr->dgram_cksum = get_ipv6_psd_sum(ipv6_hdr);
if (ipv6_tunnel)
udp_hdr->dgram_cksum = 0;
else
udp_hdr->dgram_cksum =
get_ipv6_psd_sum(ipv6_hdr);
}
else {
/* SW Implementation */
/* checksum field need be clear first */
udp_hdr->dgram_cksum = 0;
udp_hdr->dgram_cksum = get_ipv6_udptcp_checksum(ipv6_hdr,
(uint16_t*)udp_hdr);
(uint16_t *)udp_hdr);
}
if (ipv6_tunnel) {
uint16_t len;
/* Check if inner L3/L4 checksum flag is set */
if (tx_ol_flags & 0xF0)
ol_flags |= PKT_TX_VXLAN_CKSUM;
inner_l2_len = sizeof(struct ether_hdr);
inner_eth_hdr = (struct ether_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + l2_len + l3_len + ETHER_VXLAN_HLEN);
eth_type = rte_be_to_cpu_16(inner_eth_hdr->ether_type);
if (eth_type == ETHER_TYPE_VLAN) {
inner_l2_len += sizeof(struct vlan_hdr);
eth_type = rte_be_to_cpu_16(*(uint16_t *)
((uintptr_t)&eth_hdr->ether_type +
sizeof(struct vlan_hdr)));
}
len = l2_len + l3_len + ETHER_VXLAN_HLEN + inner_l2_len;
if (eth_type == ETHER_TYPE_IPv4) {
inner_l3_len = sizeof(struct ipv4_hdr);
inner_ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len);
inner_l4_proto = inner_ipv4_hdr->next_proto_id;
/* HW offload */
if (tx_ol_flags & 0x10) {
/* Do not delete, this is required by HW*/
inner_ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_IPV4_CSUM;
}
} else if (eth_type == ETHER_TYPE_IPv6) {
inner_l3_len = sizeof(struct ipv6_hdr);
inner_ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len);
inner_l4_proto = inner_ipv6_hdr->proto;
}
if ((inner_l4_proto == IPPROTO_UDP) && (tx_ol_flags & 0x20)) {
inner_udp_hdr = (struct udp_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len + inner_l3_len);
/* HW offload */
ol_flags |= PKT_TX_UDP_CKSUM;
inner_udp_hdr->dgram_cksum = 0;
if (eth_type == ETHER_TYPE_IPv4)
inner_udp_hdr->dgram_cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
else if (eth_type == ETHER_TYPE_IPv6)
inner_udp_hdr->dgram_cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
} else if ((inner_l4_proto == IPPROTO_TCP) && (tx_ol_flags & 0x40)) {
/* HW offload */
ol_flags |= PKT_TX_TCP_CKSUM;
inner_tcp_hdr = (struct tcp_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len + inner_l3_len);
if (eth_type == ETHER_TYPE_IPv4)
inner_tcp_hdr->cksum = get_ipv4_psd_sum(inner_ipv4_hdr);
else if (eth_type == ETHER_TYPE_IPv6)
inner_tcp_hdr->cksum = get_ipv6_psd_sum(inner_ipv6_hdr);
} else if ((inner_l4_proto == IPPROTO_SCTP) && (tx_ol_flags & 0x80)) {
/* HW offload */
ol_flags |= PKT_TX_SCTP_CKSUM;
inner_sctp_hdr = (struct sctp_hdr *) (rte_pktmbuf_mtod(mb,
unsigned char *) + len + inner_l3_len);
inner_sctp_hdr->cksum = 0;
}
}
}
else if (l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
@ -434,6 +598,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
/* Combine the packet header write. VLAN is not consider here */
mb->l2_len = l2_len;
mb->l3_len = l3_len;
mb->inner_l2_len = inner_l2_len;
mb->inner_l3_len = inner_l3_len;
mb->ol_flags = ol_flags;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);