build: make GRO/GSO libraries optional

GRO and GSO integration in testpmd is relatively self contained and easy
to extract.
Those libraries can be made optional as they provide standalone
features.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
This commit is contained in:
David Marchand 2021-11-17 12:28:44 +01:00 committed by Thomas Monjalon
parent bd93fc6830
commit 6970401e97
7 changed files with 90 additions and 7 deletions

View File

@ -34,7 +34,9 @@
#include <rte_string_fns.h>
#include <rte_devargs.h>
#include <rte_flow.h>
#ifdef RTE_LIB_GRO
#include <rte_gro.h>
#endif
#include <rte_mbuf_dyn.h>
#include <cmdline_rdline.h>
@ -458,6 +460,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"tso show (portid)"
" Display the status of TCP Segmentation Offload.\n\n"
#ifdef RTE_LIB_GRO
"set port (port_id) gro on|off\n"
" Enable or disable Generic Receive Offload in"
" csum forwarding engine.\n\n"
@ -468,7 +471,9 @@ static void cmd_help_long_parsed(void *parsed_result,
"set gro flush (cycles)\n"
" Set the cycle to flush GROed packets from"
" reassembly tables.\n\n"
#endif
#ifdef RTE_LIB_GSO
"set port (port_id) gso (on|off)"
" Enable or disable Generic Segmentation Offload in"
" csum forwarding engine.\n\n"
@ -479,6 +484,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"show port (port_id) gso\n"
" Show GSO configuration.\n\n"
#endif
"set fwd (%s)\n"
" Set packet forwarding mode.\n\n"
@ -5149,6 +5155,7 @@ cmdline_parse_inst_t cmd_tunnel_tso_show = {
},
};
#ifdef RTE_LIB_GRO
/* *** SET GRO FOR A PORT *** */
struct cmd_gro_enable_result {
cmdline_fixed_string_t cmd_set;
@ -5292,7 +5299,9 @@ cmdline_parse_inst_t cmd_gro_flush = {
NULL,
},
};
#endif /* RTE_LIB_GRO */
#ifdef RTE_LIB_GSO
/* *** ENABLE/DISABLE GSO *** */
struct cmd_gso_enable_result {
cmdline_fixed_string_t cmd_set;
@ -5459,6 +5468,7 @@ cmdline_parse_inst_t cmd_gso_show = {
NULL,
},
};
#endif /* RTE_LIB_GSO */
/* *** ENABLE/DISABLE FLUSH ON RX STREAMS *** */
struct cmd_set_flush_rx {
@ -17660,12 +17670,16 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_tso_show,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_set,
(cmdline_parse_inst_t *)&cmd_tunnel_tso_show,
#ifdef RTE_LIB_GRO
(cmdline_parse_inst_t *)&cmd_gro_enable,
(cmdline_parse_inst_t *)&cmd_gro_flush,
(cmdline_parse_inst_t *)&cmd_gro_show,
#endif
#ifdef RTE_LIB_GSO
(cmdline_parse_inst_t *)&cmd_gso_enable,
(cmdline_parse_inst_t *)&cmd_gso_size,
(cmdline_parse_inst_t *)&cmd_gso_show,
#endif
(cmdline_parse_inst_t *)&cmd_link_flow_control_set,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx,
(cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx,

View File

@ -48,7 +48,9 @@
#ifdef RTE_NET_BNXT
#include <rte_pmd_bnxt.h>
#endif
#ifdef RTE_LIB_GRO
#include <rte_gro.h>
#endif
#include <rte_hexdump.h>
#include "testpmd.h"
@ -4191,6 +4193,7 @@ set_tx_pkt_times(unsigned int *tx_times)
tx_pkt_times_intra = tx_times[1];
}
#ifdef RTE_LIB_GRO
void
setup_gro(const char *onoff, portid_t port_id)
{
@ -4272,7 +4275,9 @@ show_gro(portid_t port_id)
} else
printf("Port %u doesn't enable GRO.\n", port_id);
}
#endif /* RTE_LIB_GRO */
#ifdef RTE_LIB_GSO
void
setup_gso(const char *mode, portid_t port_id)
{
@ -4296,6 +4301,7 @@ setup_gso(const char *mode, portid_t port_id)
gso_ports[port_id].enable = 0;
}
}
#endif /* RTE_LIB_GSO */
char*
list_pkt_forwarding_modes(void)

View File

@ -40,8 +40,12 @@
#include <rte_prefetch.h>
#include <rte_string_fns.h>
#include <rte_flow.h>
#ifdef RTE_LIB_GRO
#include <rte_gro.h>
#endif
#ifdef RTE_LIB_GSO
#include <rte_gso.h>
#endif
#include <rte_geneve.h>
#include "testpmd.h"
@ -68,7 +72,9 @@ uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
/* structure that caches offload info for the current packet */
struct testpmd_offload_info {
uint16_t ethertype;
#ifdef RTE_LIB_GSO
uint8_t gso_enable;
#endif
uint16_t l2_len;
uint16_t l3_len;
uint16_t l4_len;
@ -510,8 +516,10 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
info->ethertype);
}
}
#ifdef RTE_LIB_GSO
if (info->gso_enable)
ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
#endif
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
if (tso_segsz)
@ -524,8 +532,10 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
get_udptcp_checksum(l3_hdr, tcp_hdr,
info->ethertype);
}
#ifdef RTE_LIB_GSO
if (info->gso_enable)
ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
#endif
} else if (info->l4_proto == IPPROTO_SCTP) {
sctp_hdr = (struct rte_sctp_hdr *)
((char *)l3_hdr + info->l3_len);
@ -794,16 +804,20 @@ static void
pkt_burst_checksum_forward(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
#ifdef RTE_LIB_GSO
struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
struct rte_gso_ctx *gso_ctx;
#endif
struct rte_mbuf **tx_pkts_burst;
struct rte_port *txp;
struct rte_mbuf *m, *p;
struct rte_ether_hdr *eth_hdr;
void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
#ifdef RTE_LIB_GRO
void **gro_ctx;
uint16_t gro_pkts_num;
uint8_t gro_enable;
#endif
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t nb_prep;
@ -816,8 +830,6 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint32_t rx_bad_outer_l4_csum;
uint32_t rx_bad_outer_ip_csum;
struct testpmd_offload_info info;
uint16_t nb_segments = 0;
int ret;
uint64_t start_tsc = 0;
@ -835,15 +847,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rx_bad_l4_csum = 0;
rx_bad_outer_l4_csum = 0;
rx_bad_outer_ip_csum = 0;
#ifdef RTE_LIB_GRO
gro_enable = gro_ports[fs->rx_port].enable;
#endif
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
memset(&info, 0, sizeof(info));
info.tso_segsz = txp->tso_segsz;
info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
#ifdef RTE_LIB_GSO
if (gso_ports[fs->tx_port].enable)
info.gso_enable = 1;
#endif
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
@ -1052,6 +1068,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
#ifdef RTE_LIB_GRO
if (unlikely(gro_enable)) {
if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
@ -1073,13 +1090,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
}
#endif
#ifdef RTE_LIB_GSO
if (gso_ports[fs->tx_port].enable != 0) {
uint16_t nb_segments = 0;
if (gso_ports[fs->tx_port].enable == 0)
tx_pkts_burst = pkts_burst;
else {
gso_ctx = &(current_fwd_lcore()->gso_ctx);
gso_ctx->gso_size = gso_max_segment_size;
for (i = 0; i < nb_rx; i++) {
int ret;
ret = rte_gso_segment(pkts_burst[i], gso_ctx,
&gso_segments[nb_segments],
GSO_MAX_PKT_BURST - nb_segments);
@ -1101,7 +1122,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
tx_pkts_burst = gso_segments;
nb_rx = nb_segments;
}
} else
#endif
tx_pkts_burst = pkts_burst;
nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
tx_pkts_burst, nb_rx);

View File

@ -32,7 +32,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
ext_deps += jansson_dep
endif
deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'bus_pci']
deps += ['ethdev', 'cmdline', 'metrics', 'bus_pci']
if dpdk_conf.has('RTE_CRYPTO_SCHEDULER')
deps += 'crypto_scheduler'
endif
@ -43,6 +43,12 @@ if dpdk_conf.has('RTE_LIB_BPF')
sources += files('bpf_cmd.c')
deps += 'bpf'
endif
if dpdk_conf.has('RTE_LIB_GRO')
deps += 'gro'
endif
if dpdk_conf.has('RTE_LIB_GSO')
deps += 'gso'
endif
if dpdk_conf.has('RTE_LIB_LATENCYSTATS')
deps += 'latencystats'
endif

View File

@ -517,8 +517,10 @@ lcoreid_t bitrate_lcore_id;
uint8_t bitrate_enabled;
#endif
#ifdef RTE_LIB_GRO
struct gro_status gro_ports[RTE_MAX_ETHPORTS];
uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
#endif
/*
* hexadecimal bitmask of RX mq mode can be enabled.
@ -657,8 +659,10 @@ static void fill_xstats_display_info(void);
*/
static int all_ports_started(void);
#ifdef RTE_LIB_GSO
struct gso_status gso_ports[RTE_MAX_ETHPORTS];
uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
#endif
/* Holds the registered mbuf dynamic flags names. */
char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
@ -1632,8 +1636,12 @@ init_config(void)
struct rte_mempool *mbp;
unsigned int nb_mbuf_per_pool;
lcoreid_t lc_id;
#ifdef RTE_LIB_GRO
struct rte_gro_param gro_param;
#endif
#ifdef RTE_LIB_GSO
uint32_t gso_types;
#endif
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
@ -1716,8 +1724,10 @@ init_config(void)
init_port_config();
#ifdef RTE_LIB_GSO
gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
#endif
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
@ -1728,6 +1738,7 @@ init_config(void)
if (mbp == NULL)
mbp = mbuf_pool_find(0, 0);
fwd_lcores[lc_id]->mbp = mbp;
#ifdef RTE_LIB_GSO
/* initialize GSO context */
fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
@ -1735,10 +1746,12 @@ init_config(void)
fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
RTE_ETHER_CRC_LEN;
fwd_lcores[lc_id]->gso_ctx.flag = 0;
#endif
}
fwd_config_setup();
#ifdef RTE_LIB_GRO
/* create a gro context for each lcore */
gro_param.gro_types = RTE_GRO_TCP_IPV4;
gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
@ -1752,6 +1765,7 @@ init_config(void)
"rte_gro_ctx_create() failed\n");
}
}
#endif
}

View File

@ -9,8 +9,12 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#ifdef RTE_LIB_GRO
#include <rte_gro.h>
#endif
#ifdef RTE_LIB_GSO
#include <rte_gso.h>
#endif
#include <rte_os_shim.h>
#include <cmdline.h>
#include <sys/queue.h>
@ -143,7 +147,9 @@ struct fwd_stream {
/**< received packets has bad outer l4 checksum */
uint64_t rx_bad_outer_ip_csum;
/**< received packets having bad outer ip checksum */
#ifdef RTE_LIB_GRO
unsigned int gro_times; /**< GRO operation times */
#endif
uint64_t core_cycles; /**< used for RX and TX processing */
struct pkt_burst_stats rx_burst_stats;
struct pkt_burst_stats tx_burst_stats;
@ -264,9 +270,13 @@ struct rte_port {
* CPU id. configuration table.
*/
struct fwd_lcore {
#ifdef RTE_LIB_GSO
struct rte_gso_ctx gso_ctx; /**< GSO context */
#endif
struct rte_mempool *mbp; /**< The mbuf pool to use by this core */
#ifdef RTE_LIB_GRO
void *gro_ctx; /**< GRO context */
#endif
streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
@ -560,6 +570,7 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */
#ifdef RTE_LIB_GRO
#define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32
#define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \
GRO_DEFAULT_ITEM_NUM_PER_FLOW)
@ -573,13 +584,16 @@ struct gro_status {
};
extern struct gro_status gro_ports[RTE_MAX_ETHPORTS];
extern uint8_t gro_flush_cycles;
#endif /* RTE_LIB_GRO */
#ifdef RTE_LIB_GSO
#define GSO_MAX_PKT_BURST 2048
struct gso_status {
uint8_t enable;
};
extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
extern uint16_t gso_max_segment_size;
#endif /* RTE_LIB_GSO */
/* VXLAN encap/decap parameters. */
struct vxlan_encap_conf {
@ -1006,10 +1020,14 @@ void port_rss_hash_key_update(portid_t port_id, char rss_type[],
uint8_t *hash_key, uint8_t hash_key_len);
int rx_queue_id_is_invalid(queueid_t rxq_id);
int tx_queue_id_is_invalid(queueid_t txq_id);
#ifdef RTE_LIB_GRO
void setup_gro(const char *onoff, portid_t port_id);
void setup_gro_flush_cycles(uint8_t cycles);
void show_gro(portid_t port_id);
#endif
#ifdef RTE_LIB_GSO
void setup_gso(const char *mode, portid_t port_id);
#endif
int eth_dev_info_get_print_err(uint16_t port_id,
struct rte_eth_dev_info *dev_info);
int eth_dev_conf_get_print_err(uint16_t port_id,

View File

@ -66,6 +66,8 @@ libraries = [
]
optional_libs = [
'gro',
'gso',
'kni',
'power',
'vhost',