gso: support GRE GSO

This patch adds GSO support for GRE-tunneled packets. Supported GRE
packets must contain an outer IPv4 header, and inner TCP/IPv4 headers.
They may also contain a single VLAN tag. GRE GSO doesn't check if all
input packets have correct checksums and doesn't update checksums for
output packets. Additionally, it doesn't process IP fragmented packets.

As with VxLAN GSO, GRE GSO uses a two-segment MBUF to organize each
output packet, which requires multi-segment mbuf support in the TX
functions of the NIC driver. Also, if a packet is GSOed, GRE GSO reduces
its MBUF refcnt by 1. As a result, when all of its GSOed segments are
freed, the packet is freed automatically.

Signed-off-by: Mark Kavanagh <mark.b.kavanagh@intel.com>
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Mark Kavanagh 2017-10-07 22:56:42 +08:00 committed by Ferruh Yigit
parent b058d92ea9
commit 70e737e448
4 changed files with 23 additions and 7 deletions

View File

@ -95,6 +95,8 @@ New Features
* TCP/IPv4 packets.
* VxLAN packets, which must have an outer IPv4 header, and contain
an inner TCP/IPv4 packet.
* GRE packets, which must contain an outer IPv4 header, and inner
TCP/IPv4 headers.
The GSO library doesn't check if the input packets have correct
checksums, and doesn't update checksums for output packets.

View File

@ -55,6 +55,11 @@
(PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
PKT_TX_TUNNEL_VXLAN))
#define IS_IPV4_GRE_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_GRE)) == \
(PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
PKT_TX_TUNNEL_GRE))
/**
* Internal function which updates the UDP header of a packet, following
* segmentation. This is required to update the header's datagram length field.

View File

@ -42,11 +42,13 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
struct tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t outer_id, inner_id, tail_idx, i;
uint16_t outer_ipv4_offset, inner_ipv4_offset, udp_offset, tcp_offset;
uint16_t outer_ipv4_offset, inner_ipv4_offset;
uint16_t udp_gre_offset, tcp_offset;
uint8_t update_udp_hdr;
outer_ipv4_offset = pkt->outer_l2_len;
udp_offset = outer_ipv4_offset + pkt->outer_l3_len;
inner_ipv4_offset = udp_offset + pkt->l2_len;
udp_gre_offset = outer_ipv4_offset + pkt->outer_l3_len;
inner_ipv4_offset = udp_gre_offset + pkt->l2_len;
tcp_offset = inner_ipv4_offset + pkt->l3_len;
/* Outer IPv4 header. */
@ -63,9 +65,13 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
tail_idx = nb_segs - 1;
/* Only update UDP header for VxLAN packets. */
update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0;
for (i = 0; i < nb_segs; i++) {
update_ipv4_header(segs[i], outer_ipv4_offset, outer_id);
update_udp_header(segs[i], udp_offset);
if (update_udp_hdr)
update_udp_header(segs[i], udp_gre_offset);
update_ipv4_header(segs[i], inner_ipv4_offset, inner_id);
update_tcp_header(segs[i], tcp_offset, sent_seq, i < tail_idx);
outer_id++;

View File

@ -58,7 +58,8 @@ rte_gso_segment(struct rte_mbuf *pkt,
nb_pkts_out < 1 ||
gso_ctx->gso_size < RTE_GSO_SEG_SIZE_MIN ||
((gso_ctx->gso_types & (DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) == 0))
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0))
return -EINVAL;
if (gso_ctx->gso_size >= pkt->pkt_len) {
@ -73,8 +74,10 @@ rte_gso_segment(struct rte_mbuf *pkt,
ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED);
ol_flags = pkt->ol_flags;
if (IS_IPV4_VXLAN_TCP4(pkt->ol_flags)
&& (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) {
if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
pkt->ol_flags &= (~PKT_TX_TCP_SEG);
ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,