Jiayu Hu 0d2cbe59b7 lib/gro: support TCP/IPv4
In this patch, we introduce five APIs to support TCP/IPv4 GRO.
- gro_tcp4_reassemble: reassemble an inputted TCP/IPv4 packet.
- gro_tcp4_tbl_create: create a TCP/IPv4 reassembly table, which is used
    to merge packets.
- gro_tcp4_tbl_destroy: free memory space of a TCP/IPv4 reassembly table.
- gro_tcp4_tbl_pkt_count: return the number of packets in a TCP/IPv4
    reassembly table.
- gro_tcp4_tbl_timeout_flush: flush timeout packets from a TCP/IPv4
    reassembly table.

TCP/IPv4 GRO API assumes all inputted packets are with correct IPv4
and TCP checksums. And TCP/IPv4 GRO API doesn't update IPv4 and TCP
checksums for merged packets. If inputted packets are IP fragmented,
TCP/IPv4 GRO API assumes they are complete packets (i.e. with L4
headers).

In TCP/IPv4 GRO, we use a table structure, called TCP/IPv4 reassembly
table, to reassemble packets. A TCP/IPv4 reassembly table includes a key
array and a item array, where the key array keeps the criteria to merge
packets and the item array keeps packet information.

One key in the key array points to an item group, which consists of
packets which have the same criteria value. If two packets are able to
merge, they must be in the same item group. Each key in the key array
includes two parts:
- criteria: the criteria of merging packets. If two packets can be
    merged, they must have the same criteria value.
- start_index: the index of the first incoming packet of the item group.

Each element in the item array keeps the information of one packet. It
mainly includes three parts:
- firstseg: the address of the first segment of the packet
- lastseg: the address of the last segment of the packet
- next_pkt_index: the index of the next packet in the same item group.
    All packets in the same item group are chained by next_pkt_index.
    With next_pkt_index, we can locate all packets in the same item
    group one by one.

To process an incoming packet needs three steps:
a. check if the packet should be processed. Packets with one of the
    following properties won't be processed:
	- FIN, SYN, RST, URG, PSH, ECE or CWR bit is set;
	- packet payload length is 0.
b. traverse the key array to find a key which has the same criteria
    value with the incoming packet. If find, goto step c. Otherwise,
    insert a new key and insert the packet into the item array.
c. locate the first packet in the item group via the start_index in the
    key. Then traverse all packets in the item group via next_pkt_index.
    If find one packet which can merge with the incoming one, merge them
    together. If can't find, insert the packet into this item group.

Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
Reviewed-by: Jianfeng Tan <jianfeng.tan@intel.com>
2017-07-09 18:14:54 +02:00

279 lines
7.4 KiB
C

/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include "rte_gro.h"
#include "gro_tcp4.h"
typedef void *(*gro_tbl_create_fn)(uint16_t socket_id,
uint16_t max_flow_num,
uint16_t max_item_per_flow);
typedef void (*gro_tbl_destroy_fn)(void *tbl);
typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl);
static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = {
gro_tcp4_tbl_create, NULL};
static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = {
gro_tcp4_tbl_destroy, NULL};
static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = {
gro_tcp4_tbl_pkt_count, NULL};
/*
* GRO context structure, which is used to merge packets. It keeps
* many reassembly tables of desired GRO types. Applications need to
* create GRO context objects before using rte_gro_reassemble to
* perform GRO.
*/
struct gro_ctx {
/* GRO types to perform */
uint64_t gro_types;
/* reassembly tables */
void *tbls[RTE_GRO_TYPE_MAX_NUM];
};
void *
rte_gro_ctx_create(const struct rte_gro_param *param)
{
struct gro_ctx *gro_ctx;
gro_tbl_create_fn create_tbl_fn;
uint64_t gro_type_flag = 0;
uint64_t gro_types = 0;
uint8_t i;
gro_ctx = rte_zmalloc_socket(__func__,
sizeof(struct gro_ctx),
RTE_CACHE_LINE_SIZE,
param->socket_id);
if (gro_ctx == NULL)
return NULL;
for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
gro_type_flag = 1 << i;
if ((param->gro_types & gro_type_flag) == 0)
continue;
create_tbl_fn = tbl_create_fn[i];
if (create_tbl_fn == NULL)
continue;
gro_ctx->tbls[i] = create_tbl_fn(param->socket_id,
param->max_flow_num,
param->max_item_per_flow);
if (gro_ctx->tbls[i] == NULL) {
/* destroy all created tables */
gro_ctx->gro_types = gro_types;
rte_gro_ctx_destroy(gro_ctx);
return NULL;
}
gro_types |= gro_type_flag;
}
gro_ctx->gro_types = param->gro_types;
return gro_ctx;
}
void
rte_gro_ctx_destroy(void *ctx)
{
gro_tbl_destroy_fn destroy_tbl_fn;
struct gro_ctx *gro_ctx = ctx;
uint64_t gro_type_flag;
uint8_t i;
if (gro_ctx == NULL)
return;
for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
gro_type_flag = 1 << i;
if ((gro_ctx->gro_types & gro_type_flag) == 0)
continue;
destroy_tbl_fn = tbl_destroy_fn[i];
if (destroy_tbl_fn)
destroy_tbl_fn(gro_ctx->tbls[i]);
}
rte_free(gro_ctx);
}
uint16_t
rte_gro_reassemble_burst(struct rte_mbuf **pkts,
uint16_t nb_pkts,
const struct rte_gro_param *param)
{
uint16_t i;
uint16_t nb_after_gro = nb_pkts;
uint32_t item_num;
/* allocate a reassembly table for TCP/IPv4 GRO */
struct gro_tcp4_tbl tcp_tbl;
struct gro_tcp4_key tcp_keys[RTE_GRO_MAX_BURST_ITEM_NUM];
struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} };
struct rte_mbuf *unprocess_pkts[nb_pkts];
uint16_t unprocess_num = 0;
int32_t ret;
uint64_t current_time;
if ((param->gro_types & RTE_GRO_TCP_IPV4) == 0)
return nb_pkts;
/* get the actual number of packets */
item_num = RTE_MIN(nb_pkts, (param->max_flow_num *
param->max_item_per_flow));
item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM);
for (i = 0; i < item_num; i++)
tcp_keys[i].start_index = INVALID_ARRAY_INDEX;
tcp_tbl.keys = tcp_keys;
tcp_tbl.items = tcp_items;
tcp_tbl.key_num = 0;
tcp_tbl.item_num = 0;
tcp_tbl.max_key_num = item_num;
tcp_tbl.max_item_num = item_num;
current_time = rte_rdtsc();
for (i = 0; i < nb_pkts; i++) {
if ((pkts[i]->packet_type & (RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_TCP)) ==
(RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP)) {
ret = gro_tcp4_reassemble(pkts[i],
&tcp_tbl,
current_time);
if (ret > 0)
/* merge successfully */
nb_after_gro--;
else if (ret < 0) {
unprocess_pkts[unprocess_num++] =
pkts[i];
}
} else
unprocess_pkts[unprocess_num++] = pkts[i];
}
/* re-arrange GROed packets */
if (nb_after_gro < nb_pkts) {
i = gro_tcp4_tbl_timeout_flush(&tcp_tbl, current_time,
pkts, nb_pkts);
if (unprocess_num > 0) {
memcpy(&pkts[i], unprocess_pkts,
sizeof(struct rte_mbuf *) *
unprocess_num);
}
}
return nb_after_gro;
}
uint16_t
rte_gro_reassemble(struct rte_mbuf **pkts,
uint16_t nb_pkts,
void *ctx)
{
uint16_t i, unprocess_num = 0;
struct rte_mbuf *unprocess_pkts[nb_pkts];
struct gro_ctx *gro_ctx = ctx;
uint64_t current_time;
if ((gro_ctx->gro_types & RTE_GRO_TCP_IPV4) == 0)
return nb_pkts;
current_time = rte_rdtsc();
for (i = 0; i < nb_pkts; i++) {
if ((pkts[i]->packet_type & (RTE_PTYPE_L3_IPV4 |
RTE_PTYPE_L4_TCP)) ==
(RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP)) {
if (gro_tcp4_reassemble(pkts[i],
gro_ctx->tbls
[RTE_GRO_TCP_IPV4_INDEX],
current_time) < 0)
unprocess_pkts[unprocess_num++] = pkts[i];
} else
unprocess_pkts[unprocess_num++] = pkts[i];
}
if (unprocess_num > 0) {
memcpy(pkts, unprocess_pkts,
sizeof(struct rte_mbuf *) *
unprocess_num);
}
return unprocess_num;
}
uint16_t
rte_gro_timeout_flush(void *ctx,
uint64_t timeout_cycles,
uint64_t gro_types,
struct rte_mbuf **out,
uint16_t max_nb_out)
{
struct gro_ctx *gro_ctx = ctx;
uint64_t flush_timestamp;
gro_types = gro_types & gro_ctx->gro_types;
flush_timestamp = rte_rdtsc() - timeout_cycles;
if (gro_types & RTE_GRO_TCP_IPV4) {
return gro_tcp4_tbl_timeout_flush(
gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX],
flush_timestamp,
out, max_nb_out);
}
return 0;
}
uint64_t
rte_gro_get_pkt_count(void *ctx)
{
struct gro_ctx *gro_ctx = ctx;
gro_tbl_pkt_count_fn pkt_count_fn;
uint64_t item_num = 0;
uint64_t gro_type_flag;
uint8_t i;
for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) {
gro_type_flag = 1 << i;
if ((gro_ctx->gro_types & gro_type_flag) == 0)
continue;
pkt_count_fn = tbl_pkt_count_fn[i];
if (pkt_count_fn == NULL)
continue;
item_num += pkt_count_fn(gro_ctx->tbls[i]);
}
return item_num;
}