numam-dpdk/lib/librte_gso/rte_gso.c
Jiayu Hu b166d4f30b gso: support UDP/IPv4 fragmentation
This patch adds GSO support for UDP/IPv4 packets. Supported packets
may include a single VLAN tag. UDP/IPv4 GSO doesn't check if input
packets have correct checksums, and doesn't update checksums for
output packets (the responsibility for this lies with the application).
Additionally, UDP/IPv4 GSO doesn't process IP fragmented packets.

UDP/IPv4 GSO uses two chained MBUFs, one direct MBUF and one indrect
MBUF, to organize an output packet. The direct MBUF stores the packet
header, while the indirect mbuf simply points to a location within the
original packet's payload. Consequently, use of UDP GSO requires
multi-segment MBUF support in the TX functions of the NIC driver.

If a packet is GSO'd, UDP/IPv4 GSO reduces its MBUF refcnt by 1. As a
result, when all of its GSOed segments are freed, the packet is freed
automatically.

Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
Acked-by: Xiao Wang <xiao.w.wang@intel.com>
2018-07-11 23:45:20 +02:00

96 lines
2.6 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
#include <errno.h>
#include <rte_log.h>
#include <rte_ethdev.h>
#include "rte_gso.h"
#include "gso_common.h"
#include "gso_tcp4.h"
#include "gso_tunnel_tcp4.h"
#include "gso_udp4.h"
#define ILLEGAL_UDP_GSO_CTX(ctx) \
((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
(ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
#define ILLEGAL_TCP_GSO_CTX(ctx) \
((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
(ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
int
rte_gso_segment(struct rte_mbuf *pkt,
const struct rte_gso_ctx *gso_ctx,
struct rte_mbuf **pkts_out,
uint16_t nb_pkts_out)
{
struct rte_mempool *direct_pool, *indirect_pool;
struct rte_mbuf *pkt_seg;
uint64_t ol_flags;
uint16_t gso_size;
uint8_t ipid_delta;
int ret = 1;
if (pkt == NULL || pkts_out == NULL || gso_ctx == NULL ||
nb_pkts_out < 1 ||
(ILLEGAL_UDP_GSO_CTX(gso_ctx) &&
ILLEGAL_TCP_GSO_CTX(gso_ctx)))
return -EINVAL;
if (gso_ctx->gso_size >= pkt->pkt_len) {
pkt->ol_flags &= (~(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG));
pkts_out[0] = pkt;
return 1;
}
direct_pool = gso_ctx->direct_pool;
indirect_pool = gso_ctx->indirect_pool;
gso_size = gso_ctx->gso_size;
ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED);
ol_flags = pkt->ol_flags;
if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
pkt->ol_flags &= (~PKT_TX_TCP_SEG);
ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_TCP(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
pkt->ol_flags &= (~PKT_TX_TCP_SEG);
ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
} else if (IS_IPV4_UDP(pkt->ol_flags) &&
(gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
pkt->ol_flags &= (~PKT_TX_UDP_SEG);
ret = gso_udp4_segment(pkt, gso_size, direct_pool,
indirect_pool, pkts_out, nb_pkts_out);
} else {
/* unsupported packet, skip */
pkts_out[0] = pkt;
RTE_LOG(DEBUG, GSO, "Unsupported packet type\n");
return 1;
}
if (ret > 1) {
pkt_seg = pkt;
while (pkt_seg) {
rte_mbuf_refcnt_update(pkt_seg, -1);
pkt_seg = pkt_seg->next;
}
} else if (ret < 0) {
/* Revert the ol_flags in the event of failure. */
pkt->ol_flags = ol_flags;
}
return ret;
}