numam-dpdk/app/test-pipeline/init.c
Xuan Ding 8d54b1ec4a ethdev: remove Rx header split port offload
As announced in the deprecation note, remove the Rx offload flag
'RTE_ETH_RX_OFFLOAD_HEADER_SPLIT' and 'split_hdr_size' field from
the structure 'rte_eth_rxmode'. Meanwhile, the place where the examples
and apps initialize the 'split_hdr_size' field, and where the drivers
check if the 'split_hdr_size' value is 0 are also removed.

User can still use `RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT` for per-queue packet
split offload, which is configured by 'rte_eth_rxseg_split'.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2022-10-04 11:20:04 +02:00

255 lines
5.0 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
#include <getopt.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_lpm.h>
#include <rte_lpm6.h>
#include "main.h"
struct app_params app = {
/* Ports*/
.n_ports = APP_MAX_PORTS,
.port_rx_ring_size = 128,
.port_tx_ring_size = 512,
/* Rings */
.ring_rx_size = 128,
.ring_tx_size = 128,
/* Buffer pool */
.pool_buffer_size = 2048 + RTE_PKTMBUF_HEADROOM,
.pool_size = 32 * 1024,
.pool_cache_size = 256,
/* Burst sizes */
.burst_size_rx_read = 64,
.burst_size_rx_write = 64,
.burst_size_worker_read = 64,
.burst_size_worker_write = 64,
.burst_size_tx_read = 64,
.burst_size_tx_write = 64,
};
static struct rte_eth_conf port_conf = {
.rxmode = {
.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
.rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
static struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
.rx_free_thresh = 64,
.rx_drop_en = 0,
};
static struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0,
.tx_rs_thresh = 0,
};
static void
app_init_mbuf_pools(void)
{
/* Init the buffer pool */
RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
app.pool = rte_pktmbuf_pool_create("mempool", app.pool_size,
app.pool_cache_size, 0, app.pool_buffer_size, rte_socket_id());
if (app.pool == NULL)
rte_panic("Cannot create mbuf pool\n");
}
static void
app_init_rings(void)
{
uint32_t i;
for (i = 0; i < app.n_ports; i++) {
char name[32];
snprintf(name, sizeof(name), "app_ring_rx_%u", i);
app.rings_rx[i] = rte_ring_create(
name,
app.ring_rx_size,
rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (app.rings_rx[i] == NULL)
rte_panic("Cannot create RX ring %u\n", i);
}
for (i = 0; i < app.n_ports; i++) {
char name[32];
snprintf(name, sizeof(name), "app_ring_tx_%u", i);
app.rings_tx[i] = rte_ring_create(
name,
app.ring_tx_size,
rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (app.rings_tx[i] == NULL)
rte_panic("Cannot create TX ring %u\n", i);
}
}
static void
app_ports_check_link(void)
{
uint32_t all_ports_up, i;
char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
all_ports_up = 1;
for (i = 0; i < app.n_ports; i++) {
struct rte_eth_link link;
uint16_t port;
int ret;
port = app.ports[i];
memset(&link, 0, sizeof(link));
ret = rte_eth_link_get_nowait(port, &link);
if (ret < 0) {
RTE_LOG(INFO, USER1,
"Failed to get port %u link status: %s\n",
port, rte_strerror(-ret));
all_ports_up = 0;
continue;
}
rte_eth_link_to_str(link_status_text, sizeof(link_status_text),
&link);
RTE_LOG(INFO, USER1, "Port %u %s\n",
port,
link_status_text);
if (link.link_status == RTE_ETH_LINK_DOWN)
all_ports_up = 0;
}
if (all_ports_up == 0)
rte_panic("Some NIC ports are DOWN\n");
}
static void
app_init_ports(void)
{
uint32_t i;
/* Init NIC ports, then start the ports */
for (i = 0; i < app.n_ports; i++) {
uint16_t port;
int ret;
port = app.ports[i];
RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
/* Init port */
ret = rte_eth_dev_configure(
port,
1,
1,
&port_conf);
if (ret < 0)
rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
ret = rte_eth_promiscuous_enable(port);
if (ret != 0)
rte_panic("Cannot enable promiscuous mode for port %u: %s\n",
port, rte_strerror(-ret));
/* Init RX queues */
ret = rte_eth_rx_queue_setup(
port,
0,
app.port_rx_ring_size,
rte_eth_dev_socket_id(port),
&rx_conf,
app.pool);
if (ret < 0)
rte_panic("Cannot init RX for port %u (%d)\n",
(uint32_t) port, ret);
/* Init TX queues */
ret = rte_eth_tx_queue_setup(
port,
0,
app.port_tx_ring_size,
rte_eth_dev_socket_id(port),
&tx_conf);
if (ret < 0)
rte_panic("Cannot init TX for port %u (%d)\n",
(uint32_t) port, ret);
/* Start port */
ret = rte_eth_dev_start(port);
if (ret < 0)
rte_panic("Cannot start port %u (%d)\n", port, ret);
}
app_ports_check_link();
}
void
app_init(void)
{
app_init_mbuf_pools();
app_init_rings();
app_init_ports();
RTE_LOG(INFO, USER1, "Initialization completed\n");
}