examples/pipeline: packet framework sample

This Packet Framework sample application illustrates the capabilities
of the Intel DPDK Packet Framework toolbox.

It creates different functional blocks used by a typical IPv4 framework like:
flow classification, firewall, routing, etc.

CPU cores are connected together through standard interfaces built on SW rings,
which each CPU core running a separate pipeline instance.

Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Tested-by: Waterman Cao <waterman.cao@intel.com>
Acked-by: Pablo de Lara Guarch <pablo.de.lara.guarch@intel.com>
Acked by: Ivan Boule <ivan.boule@6wind.com>
This commit is contained in:
Cristian Dumitrescu 2014-06-04 19:08:38 +01:00 committed by Thomas Monjalon
parent 48f31ca50c
commit 77a334675f
16 changed files with 5967 additions and 0 deletions

View File

@ -0,0 +1,67 @@
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-default-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
APP = ip_pipeline
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cmdline.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_rx.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_tx.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_classification.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_routing.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_passthrough.c
ifeq ($(CONFIG_RTE_MBUF_SCATTER_GATHER),y)
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_ipv4_frag.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_ipv4_ras.c
endif
ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_firewall.c
endif
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
include $(RTE_SDK)/mk/rte.extapp.mk

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,420 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
#include <getopt.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_lpm.h>
#include <rte_lpm6.h>
#include <rte_string_fns.h>
#include <rte_cfgfile.h>
#include "main.h"
struct app_params app;
static const char usage[] =
"Usage: %s EAL_OPTIONS-- -p PORT_MASK [-f CONFIG_FILE]\n";
void
app_print_usage(char *prgname)
{
printf(usage, prgname);
}
const char *
app_core_type_id_to_string(enum app_core_type id)
{
switch (id) {
case APP_CORE_NONE: return "NONE";
case APP_CORE_MASTER: return "MASTER";
case APP_CORE_RX: return "RX";
case APP_CORE_TX: return "TX";
case APP_CORE_PT: return "PT";
case APP_CORE_FC: return "FC";
case APP_CORE_FW: return "FW";
case APP_CORE_RT: return "RT";
case APP_CORE_TM: return "TM";
case APP_CORE_IPV4_FRAG: return "IPV4_FRAG";
case APP_CORE_IPV4_RAS: return "IPV4_RAS";
default: return NULL;
}
}
int
app_core_type_string_to_id(const char *string, enum app_core_type *id)
{
if (strcmp(string, "NONE") == 0) {
*id = APP_CORE_NONE;
return 0;
}
if (strcmp(string, "MASTER") == 0) {
*id = APP_CORE_MASTER;
return 0;
}
if (strcmp(string, "RX") == 0) {
*id = APP_CORE_RX;
return 0;
}
if (strcmp(string, "TX") == 0) {
*id = APP_CORE_TX;
return 0;
}
if (strcmp(string, "PT") == 0) {
*id = APP_CORE_PT;
return 0;
}
if (strcmp(string, "FC") == 0) {
*id = APP_CORE_FC;
return 0;
}
if (strcmp(string, "FW") == 0) {
*id = APP_CORE_FW;
return 0;
}
if (strcmp(string, "RT") == 0) {
*id = APP_CORE_RT;
return 0;
}
if (strcmp(string, "TM") == 0) {
*id = APP_CORE_TM;
return 0;
}
if (strcmp(string, "IPV4_FRAG") == 0) {
*id = APP_CORE_IPV4_FRAG;
return 0;
}
if (strcmp(string, "IPV4_RAS") == 0) {
*id = APP_CORE_IPV4_RAS;
return 0;
}
return -1;
}
static uint64_t
app_get_core_mask(void)
{
uint64_t core_mask = 0;
uint32_t i;
for (i = 0; i < RTE_MAX_LCORE; i++) {
if (rte_lcore_is_enabled(i) == 0)
continue;
core_mask |= 1LLU << i;
}
return core_mask;
}
static int
app_install_coremask(uint64_t core_mask)
{
uint32_t n_cores, i;
for (n_cores = 0, i = 0; i < RTE_MAX_LCORE; i++)
if (app.cores[i].core_type != APP_CORE_NONE)
n_cores++;
if (n_cores != app.n_cores) {
rte_panic("Number of cores in COREMASK should be %u instead "
"of %u\n", n_cores, app.n_cores);
return -1;
}
for (i = 0; i < RTE_MAX_LCORE; i++) {
uint32_t core_id;
if (app.cores[i].core_type == APP_CORE_NONE)
continue;
core_id = __builtin_ctzll(core_mask);
core_mask &= ~(1LLU << core_id);
app.cores[i].core_id = core_id;
}
return 0;
}
static int
app_install_cfgfile(const char *file_name)
{
struct rte_cfgfile *file;
uint32_t n_cores, i;
memset(app.cores, 0, sizeof(app.cores));
if (file_name[0] == '\0')
return -1;
file = rte_cfgfile_load(file_name, 0);
if (file == NULL) {
rte_panic("Config file %s not found\n", file_name);
return -1;
}
n_cores = (uint32_t) rte_cfgfile_num_sections(file, "core",
strnlen("core", 5));
if (n_cores < app.n_cores) {
rte_panic("Config file parse error: not enough cores specified "
"(%u cores missing)\n", app.n_cores - n_cores);
return -1;
}
if (n_cores > app.n_cores) {
rte_panic("Config file parse error: too many cores specified "
"(%u cores too many)\n", n_cores - app.n_cores);
return -1;
}
for (i = 0; i < n_cores; i++) {
struct app_core_params *p = &app.cores[i];
char section_name[16];
const char *entry;
uint32_t j;
/* [core X] */
rte_snprintf(section_name, sizeof(section_name), "core %u", i);
if (!rte_cfgfile_has_section(file, section_name)) {
rte_panic("Config file parse error: core IDs are not "
"sequential (core %u missing)\n", i);
return -1;
}
/* type */
entry = rte_cfgfile_get_entry(file, section_name, "type");
if (!entry) {
rte_panic("Config file parse error: core %u type not "
"defined\n", i);
return -1;
}
if ((app_core_type_string_to_id(entry, &p->core_type) != 0) ||
(p->core_type == APP_CORE_NONE)) {
rte_panic("Config file parse error: core %u type "
"error\n", i);
return -1;
}
/* queues in */
entry = rte_cfgfile_get_entry(file, section_name, "queues in");
if (!entry) {
rte_panic("Config file parse error: core %u queues in "
"not defined\n", i);
return -1;
}
for (j = 0; (j < APP_MAX_SWQ_PER_CORE) && (entry != NULL);
j++) {
char *next;
p->swq_in[j] = (uint32_t) strtol(entry, &next, 10);
if (next == entry)
break;
entry = next;
}
if ((j != APP_MAX_SWQ_PER_CORE) || (*entry != '\0')) {
rte_panic("Config file parse error: core %u queues in "
"error\n", i);
return -1;
}
/* queues out */
entry = rte_cfgfile_get_entry(file, section_name, "queues out");
if (!entry) {
rte_panic("Config file parse error: core %u queues out "
"not defined\n", i);
return -1;
}
for (j = 0; (j < APP_MAX_SWQ_PER_CORE) && (entry != NULL);
j++) {
char *next;
p->swq_out[j] = (uint32_t) strtol(entry, &next, 10);
if (next == entry)
break;
entry = next;
}
if ((j != APP_MAX_SWQ_PER_CORE) || (*entry != '\0')) {
rte_panic("Config file parse error: core %u queues out "
"error\n", i);
return -1;
}
}
rte_cfgfile_close(file);
return 0;
}
void app_cores_config_print(void)
{
uint32_t i;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
uint32_t j;
if (app.cores[i].core_type == APP_CORE_NONE)
continue;
printf("---> core %u: id = %u type = %6s [", i, p->core_id,
app_core_type_id_to_string(p->core_type));
for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
printf("%2d ", (int) p->swq_in[j]);
printf("] [");
for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
printf("%2d ", (int) p->swq_out[j]);
printf("]\n");
}
}
static int
app_install_port_mask(const char *arg)
{
char *end = NULL;
uint64_t port_mask;
uint32_t i;
if (arg[0] == '\0')
return -1;
port_mask = strtoul(arg, &end, 16);
if ((end == NULL) || (*end != '\0'))
return -2;
if (port_mask == 0)
return -3;
app.n_ports = 0;
for (i = 0; i < 64; i++) {
if ((port_mask & (1LLU << i)) == 0)
continue;
if (app.n_ports >= APP_MAX_PORTS)
return -4;
app.ports[app.n_ports] = i;
app.n_ports++;
}
if (!rte_is_power_of_2(app.n_ports))
return -5;
return 0;
}
int
app_parse_args(int argc, char **argv)
{
int opt, ret;
char **argvopt;
int option_index;
char *prgname = argv[0];
static struct option lgopts[] = {
{NULL, 0, 0, 0}
};
uint64_t core_mask = app_get_core_mask();
app.n_cores = __builtin_popcountll(core_mask);
argvopt = argv;
while ((opt = getopt_long(argc, argvopt, "p:f:", lgopts,
&option_index)) != EOF) {
switch (opt) {
case 'p':
if (app_install_port_mask(optarg) != 0)
rte_panic("PORT_MASK should specify a number "
"of ports that is power of 2 less or "
"equal to %u\n", APP_MAX_PORTS);
break;
case 'f':
app_install_cfgfile(optarg);
break;
default:
return -1;
}
}
app_install_coremask(core_mask);
app_cores_config_print();
if (optind >= 0)
argv[optind - 1] = prgname;
ret = optind - 1;
optind = 0; /* reset getopt lib */
return ret;
}

614
examples/ip_pipeline/init.c Normal file
View File

@ -0,0 +1,614 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
#include <getopt.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_lpm.h>
#include <rte_lpm6.h>
#include "main.h"
#define NA APP_SWQ_INVALID
struct app_params app = {
/* CPU cores */
.cores = {
{0, APP_CORE_MASTER, {15, 16, 17, NA, NA, NA, NA, NA},
{12, 13, 14, NA, NA, NA, NA, NA} },
{0, APP_CORE_RX, {NA, NA, NA, NA, NA, NA, NA, 12},
{ 0, 1, 2, 3, NA, NA, NA, 15} },
{0, APP_CORE_FC, { 0, 1, 2, 3, NA, NA, NA, 13},
{ 4, 5, 6, 7, NA, NA, NA, 16} },
{0, APP_CORE_RT, { 4, 5, 6, 7, NA, NA, NA, 14},
{ 8, 9, 10, 11, NA, NA, NA, 17} },
{0, APP_CORE_TX, { 8, 9, 10, 11, NA, NA, NA, NA},
{NA, NA, NA, NA, NA, NA, NA, NA} },
},
/* Ports*/
.n_ports = APP_MAX_PORTS,
.rsz_hwq_rx = 128,
.rsz_hwq_tx = 512,
.bsz_hwq_rd = 64,
.bsz_hwq_wr = 64,
.port_conf = {
.rxmode = {
.split_hdr_size = 0,
.header_split = 0, /* Header Split disabled */
.hw_ip_checksum = 1, /* IP checksum offload enabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 1, /* Jumbo Frame Support enabled */
.max_rx_pkt_len = 9000, /* Jumbo Frame MAC pkt length */
.hw_strip_crc = 0, /* CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
.rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
},
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
},
.rx_conf = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
.rx_free_thresh = 64,
.rx_drop_en = 0,
},
.tx_conf = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0,
.tx_rs_thresh = 0,
},
/* SWQs */
.rsz_swq = 128,
.bsz_swq_rd = 64,
.bsz_swq_wr = 64,
/* Buffer pool */
.pool_buffer_size = 2048 + sizeof(struct rte_mbuf) +
RTE_PKTMBUF_HEADROOM,
.pool_size = 32 * 1024,
.pool_cache_size = 256,
/* Message buffer pool */
.msg_pool_buffer_size = 256,
.msg_pool_size = 1024,
.msg_pool_cache_size = 64,
/* Rule tables */
.max_arp_rules = 1 << 10,
.max_firewall_rules = 1 << 5,
.max_routing_rules = 1 << 24,
.max_flow_rules = 1 << 24,
/* Application processing */
.ether_hdr_pop_push = 0,
};
struct app_core_params *
app_get_core_params(uint32_t core_id)
{
uint32_t i;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
if (p->core_id != core_id)
continue;
return p;
}
return NULL;
}
static uint32_t
app_get_n_swq_in(void)
{
uint32_t max_swq_id = 0, i, j;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
if (p->core_type == APP_CORE_NONE)
continue;
for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
uint32_t swq_id = p->swq_in[j];
if ((swq_id != APP_SWQ_INVALID) &&
(swq_id > max_swq_id))
max_swq_id = swq_id;
}
}
return (1 + max_swq_id);
}
static uint32_t
app_get_n_swq_out(void)
{
uint32_t max_swq_id = 0, i, j;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
if (p->core_type == APP_CORE_NONE)
continue;
for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
uint32_t swq_id = p->swq_out[j];
if ((swq_id != APP_SWQ_INVALID) &&
(swq_id > max_swq_id))
max_swq_id = swq_id;
}
}
return (1 + max_swq_id);
}
static uint32_t
app_get_swq_in_count(uint32_t swq_id)
{
uint32_t n, i;
for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
uint32_t j;
if (p->core_type == APP_CORE_NONE)
continue;
for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
if (p->swq_in[j] == swq_id)
n++;
}
return n;
}
static uint32_t
app_get_swq_out_count(uint32_t swq_id)
{
uint32_t n, i;
for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
uint32_t j;
if (p->core_type == APP_CORE_NONE)
continue;
for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
if (p->swq_out[j] == swq_id)
n++;
}
return n;
}
void
app_check_core_params(void)
{
uint32_t n_swq_in = app_get_n_swq_in();
uint32_t n_swq_out = app_get_n_swq_out();
uint32_t i;
/* Check that range of SW queues is contiguous and each SW queue has
exactly one reader and one writer */
if (n_swq_in != n_swq_out)
rte_panic("Number of input SW queues is not equal to the "
"number of output SW queues\n");
for (i = 0; i < n_swq_in; i++) {
uint32_t n = app_get_swq_in_count(i);
if (n == 0)
rte_panic("SW queue %u has no reader\n", i);
if (n > 1)
rte_panic("SW queue %u has more than one reader\n", i);
}
for (i = 0; i < n_swq_out; i++) {
uint32_t n = app_get_swq_out_count(i);
if (n == 0)
rte_panic("SW queue %u has no writer\n", i);
if (n > 1)
rte_panic("SW queue %u has more than one writer\n", i);
}
/* Check the request and response queues are valid */
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
uint32_t ring_id_req, ring_id_resp;
if ((p->core_type != APP_CORE_FC) &&
(p->core_type != APP_CORE_FW) &&
(p->core_type != APP_CORE_RT)) {
continue;
}
ring_id_req = p->swq_in[APP_SWQ_IN_REQ];
if (ring_id_req == APP_SWQ_INVALID)
rte_panic("Core %u of type %u has invalid request "
"queue ID\n", p->core_id, p->core_type);
ring_id_resp = p->swq_out[APP_SWQ_OUT_RESP];
if (ring_id_resp == APP_SWQ_INVALID)
rte_panic("Core %u of type %u has invalid response "
"queue ID\n", p->core_id, p->core_type);
}
return;
}
uint32_t
app_get_first_core_id(enum app_core_type core_type)
{
uint32_t i;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
if (p->core_type == core_type)
return p->core_id;
}
return RTE_MAX_LCORE;
}
struct rte_ring *
app_get_ring_req(uint32_t core_id)
{
struct app_core_params *p = app_get_core_params(core_id);
uint32_t ring_req_id = p->swq_in[APP_SWQ_IN_REQ];
return app.rings[ring_req_id];
}
struct rte_ring *
app_get_ring_resp(uint32_t core_id)
{
struct app_core_params *p = app_get_core_params(core_id);
uint32_t ring_resp_id = p->swq_out[APP_SWQ_OUT_RESP];
return app.rings[ring_resp_id];
}
static void
app_init_mbuf_pools(void)
{
/* Init the buffer pool */
RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
app.pool = rte_mempool_create(
"mempool",
app.pool_size,
app.pool_buffer_size,
app.pool_cache_size,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(),
0);
if (app.pool == NULL)
rte_panic("Cannot create mbuf pool\n");
/* Init the indirect buffer pool */
RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n");
app.indirect_pool = rte_mempool_create(
"indirect mempool",
app.pool_size,
sizeof(struct rte_mbuf) + sizeof(struct app_pkt_metadata),
app.pool_cache_size,
0,
NULL, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(),
0);
if (app.indirect_pool == NULL)
rte_panic("Cannot create mbuf pool\n");
/* Init the message buffer pool */
RTE_LOG(INFO, USER1, "Creating the message pool ...\n");
app.msg_pool = rte_mempool_create(
"mempool msg",
app.msg_pool_size,
app.msg_pool_buffer_size,
app.msg_pool_cache_size,
0,
NULL, NULL,
rte_ctrlmbuf_init, NULL,
rte_socket_id(),
0);
if (app.msg_pool == NULL)
rte_panic("Cannot create message pool\n");
}
static void
app_init_rings(void)
{
uint32_t n_swq, i;
n_swq = app_get_n_swq_in();
RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq);
app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *),
CACHE_LINE_SIZE, rte_socket_id());
if (app.rings == NULL)
rte_panic("Cannot allocate memory to store ring pointers\n");
for (i = 0; i < n_swq; i++) {
struct rte_ring *ring;
char name[32];
rte_snprintf(name, sizeof(name), "app_ring_%u", i);
ring = rte_ring_create(
name,
app.rsz_swq,
rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (ring == NULL)
rte_panic("Cannot create ring %u\n", i);
app.rings[i] = ring;
}
}
static void
app_ports_check_link(void)
{
uint32_t all_ports_up, i;
all_ports_up = 1;
for (i = 0; i < app.n_ports; i++) {
struct rte_eth_link link;
uint32_t port;
port = app.ports[i];
memset(&link, 0, sizeof(link));
rte_eth_link_get_nowait(port, &link);
RTE_LOG(INFO, USER1, "Port %u (%u Gbps) %s\n",
port,
link.link_speed / 1000,
link.link_status ? "UP" : "DOWN");
if (link.link_status == 0)
all_ports_up = 0;
}
if (all_ports_up == 0)
rte_panic("Some NIC ports are DOWN\n");
}
static void
app_init_ports(void)
{
uint32_t i;
/* Init driver */
RTE_LOG(INFO, USER1, "Initializing the PMD driver ...\n");
if (rte_eal_pci_probe() < 0)
rte_panic("Cannot probe PCI\n");
/* Init NIC ports, then start the ports */
for (i = 0; i < app.n_ports; i++) {
uint32_t port;
int ret;
port = app.ports[i];
RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
/* Init port */
ret = rte_eth_dev_configure(
port,
1,
1,
&app.port_conf);
if (ret < 0)
rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
rte_eth_promiscuous_enable(port);
/* Init RX queues */
ret = rte_eth_rx_queue_setup(
port,
0,
app.rsz_hwq_rx,
rte_eth_dev_socket_id(port),
&app.rx_conf,
app.pool);
if (ret < 0)
rte_panic("Cannot init RX for port %u (%d)\n",
(uint32_t) port, ret);
/* Init TX queues */
ret = rte_eth_tx_queue_setup(
port,
0,
app.rsz_hwq_tx,
rte_eth_dev_socket_id(port),
&app.tx_conf);
if (ret < 0)
rte_panic("Cannot init TX for port %u (%d)\n", port,
ret);
/* Start port */
ret = rte_eth_dev_start(port);
if (ret < 0)
rte_panic("Cannot start port %u (%d)\n", port, ret);
}
app_ports_check_link();
}
#define APP_PING_TIMEOUT_SEC 5
void
app_ping(void)
{
unsigned i;
uint64_t timestamp, diff_tsc;
const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;
for (i = 0; i < RTE_MAX_LCORE; i++) {
struct app_core_params *p = &app.cores[i];
struct rte_ring *ring_req, *ring_resp;
void *msg;
struct app_msg_req *req;
int status;
if ((p->core_type != APP_CORE_FC) &&
(p->core_type != APP_CORE_FW) &&
(p->core_type != APP_CORE_RT) &&
(p->core_type != APP_CORE_RX))
continue;
ring_req = app_get_ring_req(p->core_id);
ring_resp = app_get_ring_resp(p->core_id);
/* Fill request message */
msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
if (msg == NULL)
rte_panic("Unable to allocate new message\n");
req = (struct app_msg_req *)
((struct rte_mbuf *)msg)->ctrl.data;
req->type = APP_MSG_REQ_PING;
/* Send request */
do {
status = rte_ring_sp_enqueue(ring_req, msg);
} while (status == -ENOBUFS);
/* Wait for response */
timestamp = rte_rdtsc();
do {
status = rte_ring_sc_dequeue(ring_resp, &msg);
diff_tsc = rte_rdtsc() - timestamp;
if (unlikely(diff_tsc > timeout))
rte_panic("Core %u of type %d does not respond "
"to requests\n", p->core_id,
p->core_type);
} while (status != 0);
/* Free message buffer */
rte_ctrlmbuf_free(msg);
}
}
static void
app_init_etc(void)
{
if ((app_get_first_core_id(APP_CORE_IPV4_FRAG) != RTE_MAX_LCORE) ||
(app_get_first_core_id(APP_CORE_IPV4_RAS) != RTE_MAX_LCORE)) {
RTE_LOG(INFO, USER1,
"Activating the Ethernet header pop/push ...\n");
app.ether_hdr_pop_push = 1;
}
}
void
app_init(void)
{
if ((sizeof(struct app_pkt_metadata) % CACHE_LINE_SIZE) != 0)
rte_panic("Application pkt meta-data size mismatch\n");
app_check_core_params();
app_init_mbuf_pools();
app_init_rings();
app_init_ports();
app_init_etc();
RTE_LOG(INFO, USER1, "Initialization completed\n");
}

View File

@ -0,0 +1,56 @@
; BSD LICENSE
;
; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
;
; * Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
; * Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in
; the documentation and/or other materials provided with the
; distribution.
; * Neither the name of Intel Corporation nor the names of its
; contributors may be used to endorse or promote products derived
; from this software without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
; Core configuration
[core 0]
type = MASTER
queues in = 15 16 17 -1 -1 -1 -1 -1
queues out = 12 13 14 -1 -1 -1 -1 -1
[core 1]
type = RX
queues in = -1 -1 -1 -1 -1 -1 -1 12
queues out = 0 1 2 3 -1 -1 -1 15
[core 2]
type = FC
queues in = 0 1 2 3 -1 -1 -1 13
queues out = 4 5 6 7 -1 -1 -1 16
[core 3]
type = RT
queues in = 4 5 6 7 -1 -1 -1 14
queues out = 8 9 10 11 -1 -1 -1 17
[core 4]
type = TX
queues in = 8 9 10 11 -1 -1 -1 -1
queues out = -1 -1 -1 -1 -1 -1 -1 -1

View File

@ -0,0 +1,18 @@
#Address Resolution Protocol (ARP) Table
#arp add iface ipaddr macaddr
arp add 0 0.0.0.1 0a:0b:0c:0d:0e:0f
arp add 1 0.128.0.1 1a:1b:1c:1d:1e:1f
#Routing Table
#route add ipaddr prefixlen iface gateway
route add 0.0.0.0 9 0 0.0.0.1
route add 0.128.0.0 9 1 0.128.0.1
#Flow Table
flow add all
#flow add 0.0.0.0 1.2.3.4 0 0 6 0
#flow add 10.11.12.13 0.0.0.0 0 0 6 1
#Firewall
#firewall add 1 0.0.0.0 0 0.0.0.0 9 0 65535 0 65535 6 0xf 0
#firewall add 1 0.0.0.0 0 0.128.0.0 9 0 65535 0 65535 6 0xf 1

171
examples/ip_pipeline/main.c Normal file
View File

@ -0,0 +1,171 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <sys/types.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
#include <errno.h>
#include <getopt.h>
#include <unistd.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_prefetch.h>
#include <rte_lcore.h>
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_lpm.h>
#include <rte_lpm6.h>
#include "main.h"
int
MAIN(int argc, char **argv)
{
int ret;
/* Init EAL */
ret = rte_eal_init(argc, argv);
if (ret < 0)
return -1;
argc -= ret;
argv += ret;
/* Parse application arguments (after the EAL ones) */
ret = app_parse_args(argc, argv);
if (ret < 0) {
app_print_usage(argv[0]);
return -1;
}
/* Init */
app_init();
/* Launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MASTER);
return 0;
}
int
app_lcore_main_loop(__attribute__((unused)) void *arg)
{
uint32_t core_id, i;
core_id = rte_lcore_id();
for (i = 0; i < app.n_cores; i++) {
struct app_core_params *p = &app.cores[i];
if (p->core_id != core_id)
continue;
switch (p->core_type) {
case APP_CORE_MASTER:
app_ping();
app_main_loop_cmdline();
return 0;
case APP_CORE_RX:
app_main_loop_pipeline_rx();
/* app_main_loop_rx(); */
return 0;
case APP_CORE_TX:
app_main_loop_pipeline_tx();
/* app_main_loop_tx(); */
return 0;
case APP_CORE_PT:
/* app_main_loop_pipeline_passthrough(); */
app_main_loop_passthrough();
return 0;
case APP_CORE_FC:
app_main_loop_pipeline_flow_classification();
return 0;
case APP_CORE_FW:
case APP_CORE_RT:
app_main_loop_pipeline_routing();
return 0;
#ifdef RTE_LIBRTE_ACL
app_main_loop_pipeline_firewall();
return 0;
#else
rte_exit(EXIT_FAILURE, "ACL not present in build\n");
#endif
#ifdef RTE_MBUF_SCATTER_GATHER
case APP_CORE_IPV4_FRAG:
app_main_loop_pipeline_ipv4_frag();
return 0;
case APP_CORE_IPV4_RAS:
app_main_loop_pipeline_ipv4_ras();
return 0;
#else
rte_exit(EXIT_FAILURE,
"mbuf chaining not present in build\n");
#endif
default:
rte_panic("%s: Invalid core type for core %u\n",
__func__, i);
}
}
rte_panic("%s: Algorithmic error\n", __func__);
return -1;
}

306
examples/ip_pipeline/main.h Normal file
View File

@ -0,0 +1,306 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MAIN_H_
#define _MAIN_H_
#include <stdint.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ring.h>
#include <rte_ethdev.h>
#ifdef RTE_LIBRTE_ACL
#include <rte_table_acl.h>
#endif
struct app_flow_key {
union {
struct {
uint8_t ttl; /* needs to be set to 0 */
uint8_t proto;
uint16_t header_checksum; /* needs to be set to 0 */
uint32_t ip_src;
};
uint64_t slab0;
};
union {
struct {
uint32_t ip_dst;
uint16_t port_src;
uint16_t port_dst;
};
uint64_t slab1;
};
} __attribute__((__packed__));
struct app_arp_key {
uint32_t nh_ip;
uint32_t nh_iface;
} __attribute__((__packed__));
struct app_pkt_metadata {
uint32_t signature;
uint8_t reserved1[28];
struct app_flow_key flow_key;
struct app_arp_key arp_key;
struct ether_addr nh_arp;
uint8_t reserved3[2];
} __attribute__((__packed__));
#ifndef APP_MBUF_ARRAY_SIZE
#define APP_MBUF_ARRAY_SIZE 256
#endif
struct app_mbuf_array {
struct rte_mbuf *array[APP_MBUF_ARRAY_SIZE];
uint32_t n_mbufs;
};
#ifndef APP_MAX_PORTS
#define APP_MAX_PORTS 4
#endif
#ifndef APP_MAX_SWQ_PER_CORE
#define APP_MAX_SWQ_PER_CORE 8
#endif
#define APP_SWQ_INVALID ((uint32_t)(-1))
#define APP_SWQ_IN_REQ (APP_MAX_SWQ_PER_CORE - 1)
#define APP_SWQ_OUT_RESP (APP_MAX_SWQ_PER_CORE - 1)
enum app_core_type {
APP_CORE_NONE = 0, /* Unused */
APP_CORE_MASTER, /* Management */
APP_CORE_RX, /* Reception */
APP_CORE_TX, /* Transmission */
APP_CORE_PT, /* Pass-through */
APP_CORE_FC, /* Flow Classification */
APP_CORE_FW, /* Firewall */
APP_CORE_RT, /* Routing */
APP_CORE_TM, /* Traffic Management */
APP_CORE_IPV4_FRAG,/* IPv4 Fragmentation */
APP_CORE_IPV4_RAS, /* IPv4 Reassembly */
};
struct app_core_params {
uint32_t core_id;
enum app_core_type core_type;
/* SWQ map */
uint32_t swq_in[APP_MAX_SWQ_PER_CORE];
uint32_t swq_out[APP_MAX_SWQ_PER_CORE];
} __rte_cache_aligned;
struct app_params {
/* CPU cores */
struct app_core_params cores[RTE_MAX_LCORE];
uint32_t n_cores;
/* Ports*/
uint32_t ports[APP_MAX_PORTS];
uint32_t n_ports;
uint32_t rsz_hwq_rx;
uint32_t rsz_hwq_tx;
uint32_t bsz_hwq_rd;
uint32_t bsz_hwq_wr;
struct rte_eth_conf port_conf;
struct rte_eth_rxconf rx_conf;
struct rte_eth_txconf tx_conf;
/* SW Queues (SWQs) */
struct rte_ring **rings;
uint32_t rsz_swq;
uint32_t bsz_swq_rd;
uint32_t bsz_swq_wr;
/* Buffer pool */
struct rte_mempool *pool;
struct rte_mempool *indirect_pool;
uint32_t pool_buffer_size;
uint32_t pool_size;
uint32_t pool_cache_size;
/* Message buffer pool */
struct rte_mempool *msg_pool;
uint32_t msg_pool_buffer_size;
uint32_t msg_pool_size;
uint32_t msg_pool_cache_size;
/* Rule tables */
uint32_t max_arp_rules;
uint32_t max_routing_rules;
uint32_t max_firewall_rules;
uint32_t max_flow_rules;
/* Processing */
uint32_t ether_hdr_pop_push;
} __rte_cache_aligned;
extern struct app_params app;
const char *app_core_type_id_to_string(enum app_core_type id);
int app_core_type_string_to_id(const char *string, enum app_core_type *id);
void app_cores_config_print(void);
void app_check_core_params(void);
struct app_core_params *app_get_core_params(uint32_t core_id);
uint32_t app_get_first_core_id(enum app_core_type core_type);
struct rte_ring *app_get_ring_req(uint32_t core_id);
struct rte_ring *app_get_ring_resp(uint32_t core_id);
int app_parse_args(int argc, char **argv);
void app_print_usage(char *prgname);
void app_init(void);
void app_ping(void);
int app_lcore_main_loop(void *arg);
/* Hash functions */
uint64_t test_hash(void *key, uint32_t key_size, uint64_t seed);
uint32_t rte_jhash2_16(uint32_t *k, uint32_t initval);
#if defined(__x86_64__)
uint32_t rte_aeshash_16(uint64_t *k, uint64_t seed);
uint32_t rte_crchash_16(uint64_t *k, uint64_t seed);
#endif
/* I/O with no pipeline */
void app_main_loop_rx(void);
void app_main_loop_tx(void);
void app_main_loop_passthrough(void);
/* Pipeline */
void app_main_loop_pipeline_rx(void);
void app_main_loop_pipeline_rx_frag(void);
void app_main_loop_pipeline_tx(void);
void app_main_loop_pipeline_tx_ras(void);
void app_main_loop_pipeline_flow_classification(void);
void app_main_loop_pipeline_firewall(void);
void app_main_loop_pipeline_routing(void);
void app_main_loop_pipeline_passthrough(void);
void app_main_loop_pipeline_ipv4_frag(void);
void app_main_loop_pipeline_ipv4_ras(void);
/* Command Line Interface (CLI) */
void app_main_loop_cmdline(void);
/* Messages */
enum app_msg_req_type {
APP_MSG_REQ_PING,
APP_MSG_REQ_FC_ADD,
APP_MSG_REQ_FC_DEL,
APP_MSG_REQ_FC_ADD_ALL,
APP_MSG_REQ_FW_ADD,
APP_MSG_REQ_FW_DEL,
APP_MSG_REQ_RT_ADD,
APP_MSG_REQ_RT_DEL,
APP_MSG_REQ_ARP_ADD,
APP_MSG_REQ_ARP_DEL,
APP_MSG_REQ_RX_PORT_ENABLE,
APP_MSG_REQ_RX_PORT_DISABLE,
};
struct app_msg_req {
enum app_msg_req_type type;
union {
struct {
uint32_t ip;
uint8_t depth;
uint8_t port;
uint32_t nh_ip;
} routing_add;
struct {
uint32_t ip;
uint8_t depth;
} routing_del;
struct {
uint8_t out_iface;
uint32_t nh_ip;
struct ether_addr nh_arp;
} arp_add;
struct {
uint8_t out_iface;
uint32_t nh_ip;
} arp_del;
struct {
union {
uint8_t key_raw[16];
struct app_flow_key key;
};
uint8_t port;
} flow_classif_add;
struct {
union {
uint8_t key_raw[16];
struct app_flow_key key;
};
} flow_classif_del;
#ifdef RTE_LIBRTE_ACL
struct {
struct rte_table_acl_rule_add_params add_params;
uint8_t port;
} firewall_add;
struct {
struct rte_table_acl_rule_delete_params delete_params;
} firewall_del;
#endif
struct {
uint8_t port;
} rx_up;
struct {
uint8_t port;
} rx_down;
};
};
struct app_msg_resp {
int result;
};
#define APP_FLUSH 0xFF
#ifdef RTE_EXEC_ENV_BAREMETAL
#define MAIN _main
#else
#define MAIN main
#endif
int MAIN(int argc, char **argv);
#endif /* _MAIN_H_ */

View File

@ -0,0 +1,313 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <rte_malloc.h>
#include <rte_log.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_byteorder.h>
#include <rte_port_ring.h>
#include <rte_table_acl.h>
#include <rte_pipeline.h>
#include "main.h"
struct app_core_firewall_message_handle_params {
struct rte_ring *ring_req;
struct rte_ring *ring_resp;
struct rte_pipeline *p;
uint32_t *port_out_id;
uint32_t table_id;
};
static void
app_message_handle(struct app_core_firewall_message_handle_params *params);
enum {
PROTO_FIELD_IPV4,
SRC_FIELD_IPV4,
DST_FIELD_IPV4,
SRCP_FIELD_IPV4,
DSTP_FIELD_IPV4,
NUM_FIELDS_IPV4
};
struct rte_acl_field_def ipv4_field_formats[NUM_FIELDS_IPV4] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
.field_index = PROTO_FIELD_IPV4,
.input_index = PROTO_FIELD_IPV4,
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC_FIELD_IPV4,
.input_index = SRC_FIELD_IPV4,
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv4_hdr, src_addr),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST_FIELD_IPV4,
.input_index = DST_FIELD_IPV4,
.offset = sizeof(struct ether_hdr) +
offsetof(struct ipv4_hdr, dst_addr),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = SRCP_FIELD_IPV4,
.input_index = SRCP_FIELD_IPV4,
.offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = DSTP_FIELD_IPV4,
.input_index = SRCP_FIELD_IPV4,
.offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
sizeof(uint16_t),
},
};
void
app_main_loop_pipeline_firewall(void) {
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id;
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
struct app_core_firewall_message_handle_params mh_params;
if ((core_params == NULL) || (core_params->core_type != APP_CORE_FW))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing firewall\n", core_id);
/* Pipeline configuration */
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("Unable to configure the pipeline\n");
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_params port_ring_params = {
.ring = app.rings[core_params->swq_in[i]],
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i]))
rte_panic("Unable to configure input port for "
"ring %d\n", i);
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i]))
rte_panic("Unable to configure output port for "
"ring %d\n", i);
}
/* Table configuration */
{
struct rte_table_acl_params table_acl_params = {
.name = "test", /* unique identifier for acl contexts */
.n_rules = app.max_firewall_rules,
.n_rule_fields = DIM(ipv4_field_formats),
};
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_acl_ops,
.arg_create = &table_acl_params,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
memcpy(table_acl_params.field_format, ipv4_field_formats,
sizeof(ipv4_field_formats));
if (rte_pipeline_table_create(p, &table_params, &table_id))
rte_panic("Unable to configure the ACL table\n");
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id))
rte_panic("Unable to connect input port %u to "
"table %u\n", port_in_id[i], table_id);
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("Pipeline consistency check failed\n");
/* Message handling */
mh_params.ring_req = app_get_ring_req(
app_get_first_core_id(APP_CORE_FW));
mh_params.ring_resp = app_get_ring_resp(
app_get_first_core_id(APP_CORE_FW));
mh_params.p = p;
mh_params.port_out_id = port_out_id;
mh_params.table_id = table_id;
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0) {
rte_pipeline_flush(p);
app_message_handle(&mh_params);
}
}
}
void
app_message_handle(struct app_core_firewall_message_handle_params *params)
{
struct rte_ring *ring_req = params->ring_req;
struct rte_ring *ring_resp;
struct rte_mbuf *msg;
struct app_msg_req *req;
struct app_msg_resp *resp;
struct rte_pipeline *p;
uint32_t *port_out_id;
uint32_t table_id;
int result;
/* Read request message */
result = rte_ring_sc_dequeue(ring_req, (void **) &msg);
if (result != 0)
return;
ring_resp = params->ring_resp;
p = params->p;
port_out_id = params->port_out_id;
table_id = params->table_id;
/* Handle request */
req = (struct app_msg_req *) msg->ctrl.data;
switch (req->type) {
case APP_MSG_REQ_PING:
{
result = 0;
break;
}
case APP_MSG_REQ_FW_ADD:
{
struct rte_pipeline_table_entry entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[req->firewall_add.port]},
};
struct rte_pipeline_table_entry *entry_ptr;
int key_found;
result = rte_pipeline_table_entry_add(p, table_id,
&req->firewall_add.add_params, &entry, &key_found,
&entry_ptr);
break;
}
case APP_MSG_REQ_FW_DEL:
{
int key_found;
result = rte_pipeline_table_entry_delete(p, table_id,
&req->firewall_del.delete_params, &key_found, NULL);
break;
}
default:
rte_panic("FW unrecognized message type (%u)\n", req->type);
}
/* Fill in response message */
resp = (struct app_msg_resp *) msg->ctrl.data;
resp->result = result;
/* Send response */
do {
result = rte_ring_sp_enqueue(ring_resp, (void *) msg);
} while (result == -ENOBUFS);
}

View File

@ -0,0 +1,306 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <rte_malloc.h>
#include <rte_log.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_byteorder.h>
#include <rte_port_ring.h>
#include <rte_table_hash.h>
#include <rte_pipeline.h>
#include "main.h"
struct app_core_fc_message_handle_params {
struct rte_ring *ring_req;
struct rte_ring *ring_resp;
struct rte_pipeline *p;
uint32_t *port_out_id;
uint32_t table_id;
};
static void
app_message_handle(struct app_core_fc_message_handle_params *params);
static int app_flow_classification_table_init(
struct rte_pipeline *p,
uint32_t *port_out_id,
uint32_t table_id)
{
struct app_flow_key flow_key;
uint32_t i;
/* Add entries to tables */
for (i = 0; i < (1 << 24); i++) {
struct rte_pipeline_table_entry entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i & (app.n_ports - 1)]},
};
struct rte_pipeline_table_entry *entry_ptr;
int key_found, status;
flow_key.ttl = 0;
flow_key.proto = 6; /* TCP */
flow_key.header_checksum = 0;
flow_key.ip_src = 0;
flow_key.ip_dst = rte_bswap32(i);
flow_key.port_src = 0;
flow_key.port_dst = 0;
status = rte_pipeline_table_entry_add(p, table_id,
(void *) &flow_key, &entry, &key_found, &entry_ptr);
if (status < 0)
rte_panic("Unable to add entry to table %u (%d)\n",
table_id, status);
}
return 0;
}
void
app_main_loop_pipeline_flow_classification(void) {
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id;
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
struct app_core_fc_message_handle_params mh_params;
if ((core_params == NULL) || (core_params->core_type != APP_CORE_FC))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing flow classification "
"(pipeline with hash table, 16-byte key, LRU)\n", core_id);
/* Pipeline configuration */
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("Unable to configure the pipeline\n");
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_params port_ring_params = {
.ring = app.rings[core_params->swq_in[i]],
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i]))
rte_panic("Unable to configure input port for "
"ring %d\n", i);
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i]))
rte_panic("Unable to configure output port for "
"ring %d\n", i);
}
/* Table configuration */
{
struct rte_table_hash_key16_lru_params table_hash_params = {
.n_entries = 1 << 24,
.signature_offset = __builtin_offsetof(
struct app_pkt_metadata, signature),
.key_offset = __builtin_offsetof(
struct app_pkt_metadata, flow_key),
.f_hash = test_hash,
.seed = 0,
};
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key16_lru_ops,
.arg_create = &table_hash_params,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
if (rte_pipeline_table_create(p, &table_params, &table_id))
rte_panic("Unable to configure the hash table\n");
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id))
rte_panic("Unable to connect input port %u to "
"table %u\n", port_in_id[i], table_id);
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("Pipeline consistency check failed\n");
/* Message handling */
mh_params.ring_req = app_get_ring_req(
app_get_first_core_id(APP_CORE_FC));
mh_params.ring_resp = app_get_ring_resp(
app_get_first_core_id(APP_CORE_FC));
mh_params.p = p;
mh_params.port_out_id = port_out_id;
mh_params.table_id = table_id;
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0) {
rte_pipeline_flush(p);
app_message_handle(&mh_params);
}
}
}
void
app_message_handle(struct app_core_fc_message_handle_params *params)
{
struct rte_ring *ring_req = params->ring_req;
struct rte_ring *ring_resp;
void *msg;
struct app_msg_req *req;
struct app_msg_resp *resp;
struct rte_pipeline *p;
uint32_t *port_out_id;
uint32_t table_id;
int result;
/* Read request message */
result = rte_ring_sc_dequeue(ring_req, &msg);
if (result != 0)
return;
ring_resp = params->ring_resp;
p = params->p;
port_out_id = params->port_out_id;
table_id = params->table_id;
/* Handle request */
req = (struct app_msg_req *) ((struct rte_mbuf *)msg)->ctrl.data;
switch (req->type) {
case APP_MSG_REQ_PING:
{
result = 0;
break;
}
case APP_MSG_REQ_FC_ADD_ALL:
{
result = app_flow_classification_table_init(p, port_out_id,
table_id);
break;
}
case APP_MSG_REQ_FC_ADD:
{
struct rte_pipeline_table_entry entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[req->flow_classif_add.port]},
};
struct rte_pipeline_table_entry *entry_ptr;
int key_found;
result = rte_pipeline_table_entry_add(p, table_id,
req->flow_classif_add.key_raw, &entry, &key_found,
&entry_ptr);
break;
}
case APP_MSG_REQ_FC_DEL:
{
int key_found;
result = rte_pipeline_table_entry_delete(p, table_id,
req->flow_classif_add.key_raw, &key_found, NULL);
break;
}
default:
rte_panic("FC Unrecognized message type (%u)\n", req->type);
}
/* Fill in response message */
resp = (struct app_msg_resp *) ((struct rte_mbuf *)msg)->ctrl.data;
resp->result = result;
/* Send response */
do {
result = rte_ring_sp_enqueue(ring_resp, msg);
} while (result == -ENOBUFS);
}

View File

@ -0,0 +1,184 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_port_ethdev.h>
#include <rte_port_ring.h>
#include <rte_port_frag.h>
#include <rte_table_stub.h>
#include <rte_pipeline.h>
#include "main.h"
void
app_main_loop_pipeline_ipv4_frag(void) {
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id[APP_MAX_PORTS];
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) ||
(core_params->core_type != APP_CORE_IPV4_FRAG))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing IPv4 fragmentation\n", core_id);
/* Pipeline configuration */
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_ipv4_frag_params
port_frag_params = {
.ring = app.rings[core_params->swq_in[i]],
.mtu = 1500,
.metadata_size = sizeof(struct app_pkt_metadata),
.pool_direct = app.pool,
.pool_indirect = app.indirect_pool,
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ipv4_frag_ops,
.arg_create = (void *) &port_frag_params,
.f_action = NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i]))
rte_panic("%s: Unable to configure input port %i\n",
__func__, i);
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i]))
rte_panic("%s: Unable to configure output port %i\n",
__func__, i);
}
/* Table configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops,
.arg_create = NULL,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
rte_panic("%s: Unable to configure table %u\n",
__func__, table_id[i]);
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id[i]))
rte_panic("%s: Unable to connect input port %u to "
"table %u\n", __func__, port_in_id[i],
table_id[i]);
/* Add entries to tables */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
if (rte_pipeline_table_default_entry_add(p, table_id[i],
&default_entry, &default_entry_ptr))
rte_panic("%s: Unable to add default entry to "
"table %u\n", __func__, table_id[i]);
}
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0)
rte_pipeline_flush(p);
}
}

View File

@ -0,0 +1,181 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_port_ethdev.h>
#include <rte_port_ring.h>
#include <rte_port_ras.h>
#include <rte_table_stub.h>
#include <rte_pipeline.h>
#include "main.h"
void
app_main_loop_pipeline_ipv4_ras(void) {
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id[APP_MAX_PORTS];
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) ||
(core_params->core_type != APP_CORE_IPV4_RAS)) {
rte_panic("Core %u misconfiguration\n", core_id);
}
RTE_LOG(INFO, USER1, "Core %u is doing IPv4 reassembly\n", core_id);
/* Pipeline configuration */
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_params port_ring_params = {
.ring = app.rings[core_params->swq_in[i]],
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i]))
rte_panic("%s: Unable to configure input port %i\n",
__func__, i);
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_ipv4_ras_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ipv4_ras_ops,
.arg_create = (void *) &port_ring_ipv4_ras_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i]))
rte_panic("%s: Unable to configure output port %i\n",
__func__, i);
}
/* Table configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops,
.arg_create = NULL,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
rte_panic("%s: Unable to configure table %u\n",
__func__, table_id[i]);
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id[i]))
rte_panic("%s: Unable to connect input port %u to "
"table %u\n", __func__, port_in_id[i],
table_id[i]);
/* Add entries to tables */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
if (rte_pipeline_table_default_entry_add(p, table_id[i],
&default_entry,
&default_entry_ptr))
rte_panic("%s: Unable to add default entry to "
"table %u\n", __func__, table_id[i]);
}
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0)
rte_pipeline_flush(p);
}
}

View File

@ -0,0 +1,213 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <rte_malloc.h>
#include <rte_log.h>
#include <rte_port_ring.h>
#include <rte_table_stub.h>
#include <rte_pipeline.h>
#include "main.h"
void
app_main_loop_pipeline_passthrough(void) {
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id[APP_MAX_PORTS];
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing pass-through\n", core_id);
/* Pipeline configuration */
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_params port_ring_params = {
.ring = app.rings[core_params->swq_in[i]],
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i])) {
rte_panic("%s: Unable to configure input port for "
"ring %d\n", __func__, i);
}
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i])) {
rte_panic("%s: Unable to configure output port for "
"ring %d\n", __func__, i);
}
}
/* Table configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops,
.arg_create = NULL,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
rte_panic("%s: Unable to configure table %u\n",
__func__, i);
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++) {
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id[i])) {
rte_panic("%s: Unable to connect input port %u to "
"table %u\n", __func__, port_in_id[i],
table_id[i]);
}
}
/* Add entries to tables */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
if (rte_pipeline_table_default_entry_add(p, table_id[i],
&default_entry, &default_entry_ptr))
rte_panic("%s: Unable to add default entry to "
"table %u\n", __func__, table_id[i]);
}
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0)
rte_pipeline_flush(p);
}
}
void
app_main_loop_passthrough(void) {
struct app_mbuf_array *m;
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing pass-through (no pipeline)\n",
core_id);
m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
CACHE_LINE_SIZE, rte_socket_id());
if (m == NULL)
rte_panic("%s: cannot allocate buffer space\n", __func__);
for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
int ret;
ret = rte_ring_sc_dequeue_bulk(
app.rings[core_params->swq_in[i]],
(void **) m->array,
app.bsz_swq_rd);
if (ret == -ENOENT)
continue;
do {
ret = rte_ring_sp_enqueue_bulk(
app.rings[core_params->swq_out[i]],
(void **) m->array,
app.bsz_swq_wr);
} while (ret < 0);
}
}

View File

@ -0,0 +1,474 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <rte_malloc.h>
#include <rte_log.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_byteorder.h>
#include <rte_port_ring.h>
#include <rte_table_lpm.h>
#include <rte_table_hash.h>
#include <rte_pipeline.h>
#include "main.h"
#include <unistd.h>
struct app_routing_table_entry {
struct rte_pipeline_table_entry head;
uint32_t nh_ip;
uint32_t nh_iface;
};
struct app_arp_table_entry {
struct rte_pipeline_table_entry head;
struct ether_addr nh_arp;
};
static inline void
app_routing_table_write_metadata(
struct rte_mbuf *pkt,
struct app_routing_table_entry *entry)
{
struct app_pkt_metadata *c =
(struct app_pkt_metadata *) RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
c->arp_key.nh_ip = entry->nh_ip;
c->arp_key.nh_iface = entry->nh_iface;
}
static int
app_routing_table_ah(
struct rte_mbuf **pkts,
uint64_t *pkts_mask,
struct rte_pipeline_table_entry **entries,
__attribute__((unused)) void *arg)
{
uint64_t pkts_in_mask = *pkts_mask;
if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) {
uint64_t n_pkts = __builtin_popcountll(pkts_in_mask);
uint32_t i;
for (i = 0; i < n_pkts; i++) {
struct rte_mbuf *m = pkts[i];
struct app_routing_table_entry *a =
(struct app_routing_table_entry *) entries[i];
app_routing_table_write_metadata(m, a);
}
} else
for ( ; pkts_in_mask; ) {
struct rte_mbuf *m;
struct app_routing_table_entry *a;
uint64_t pkt_mask;
uint32_t packet_index;
packet_index = __builtin_ctzll(pkts_in_mask);
pkt_mask = 1LLU << packet_index;
pkts_in_mask &= ~pkt_mask;
m = pkts[packet_index];
a = (struct app_routing_table_entry *)
entries[packet_index];
app_routing_table_write_metadata(m, a);
}
return 0;
}
static inline void
app_arp_table_write_metadata(
struct rte_mbuf *pkt,
struct app_arp_table_entry *entry)
{
struct app_pkt_metadata *c =
(struct app_pkt_metadata *) RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
ether_addr_copy(&entry->nh_arp, &c->nh_arp);
}
static int
app_arp_table_ah(
struct rte_mbuf **pkts,
uint64_t *pkts_mask,
struct rte_pipeline_table_entry **entries,
__attribute__((unused)) void *arg)
{
uint64_t pkts_in_mask = *pkts_mask;
if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) {
uint64_t n_pkts = __builtin_popcountll(pkts_in_mask);
uint32_t i;
for (i = 0; i < n_pkts; i++) {
struct rte_mbuf *m = pkts[i];
struct app_arp_table_entry *a =
(struct app_arp_table_entry *) entries[i];
app_arp_table_write_metadata(m, a);
}
} else {
for ( ; pkts_in_mask; ) {
struct rte_mbuf *m;
struct app_arp_table_entry *a;
uint64_t pkt_mask;
uint32_t packet_index;
packet_index = __builtin_ctzll(pkts_in_mask);
pkt_mask = 1LLU << packet_index;
pkts_in_mask &= ~pkt_mask;
m = pkts[packet_index];
a = (struct app_arp_table_entry *)
entries[packet_index];
app_arp_table_write_metadata(m, a);
}
}
return 0;
}
static uint64_t app_arp_table_hash(
void *key,
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed)
{
uint32_t *k = (uint32_t *) key;
return k[1];
}
struct app_core_routing_message_handle_params {
struct rte_ring *ring_req;
struct rte_ring *ring_resp;
struct rte_pipeline *p;
uint32_t *port_out_id;
uint32_t routing_table_id;
uint32_t arp_table_id;
};
static void
app_message_handle(struct app_core_routing_message_handle_params *params);
void
app_main_loop_pipeline_routing(void) {
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t routing_table_id, arp_table_id;
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
struct app_core_routing_message_handle_params mh_params;
if ((core_params == NULL) || (core_params->core_type != APP_CORE_RT))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing routing\n", core_id);
/* Pipeline configuration */
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("Unable to configure the pipeline\n");
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_params port_ring_params = {
.ring = app.rings[core_params->swq_in[i]],
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i]))
rte_panic("Unable to configure input port for "
"ring %d\n", i);
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i]))
rte_panic("Unable to configure output port for "
"ring %d\n", i);
}
/* Routing table configuration */
{
struct rte_table_lpm_params table_lpm_params = {
.n_rules = app.max_routing_rules,
.entry_unique_size =
sizeof(struct app_routing_table_entry),
.offset = __builtin_offsetof(struct app_pkt_metadata,
flow_key.ip_dst),
};
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_lpm_ops,
.arg_create = &table_lpm_params,
.f_action_hit = app_routing_table_ah,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size =
sizeof(struct app_routing_table_entry) -
sizeof(struct rte_pipeline_table_entry),
};
if (rte_pipeline_table_create(p, &table_params,
&routing_table_id))
rte_panic("Unable to configure the LPM table\n");
}
/* ARP table configuration */
{
struct rte_table_hash_key8_lru_params table_arp_params = {
.n_entries = app.max_arp_rules,
.f_hash = app_arp_table_hash,
.seed = 0,
.signature_offset = 0, /* Unused */
.key_offset = __builtin_offsetof(
struct app_pkt_metadata, arp_key),
};
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_key8_lru_dosig_ops,
.arg_create = &table_arp_params,
.f_action_hit = app_arp_table_ah,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = sizeof(struct app_arp_table_entry) -
sizeof(struct rte_pipeline_table_entry),
};
if (rte_pipeline_table_create(p, &table_params, &arp_table_id))
rte_panic("Unable to configure the ARP table\n");
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++) {
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
routing_table_id))
rte_panic("Unable to connect input port %u to "
"table %u\n", port_in_id[i], routing_table_id);
}
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("Pipeline consistency check failed\n");
/* Message handling */
mh_params.ring_req =
app_get_ring_req(app_get_first_core_id(APP_CORE_RT));
mh_params.ring_resp =
app_get_ring_resp(app_get_first_core_id(APP_CORE_RT));
mh_params.p = p;
mh_params.port_out_id = port_out_id;
mh_params.routing_table_id = routing_table_id;
mh_params.arp_table_id = arp_table_id;
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0) {
rte_pipeline_flush(p);
app_message_handle(&mh_params);
}
}
}
void
app_message_handle(struct app_core_routing_message_handle_params *params)
{
struct rte_ring *ring_req = params->ring_req;
struct rte_ring *ring_resp;
void *msg;
struct app_msg_req *req;
struct app_msg_resp *resp;
struct rte_pipeline *p;
uint32_t *port_out_id;
uint32_t routing_table_id, arp_table_id;
int result;
/* Read request message */
result = rte_ring_sc_dequeue(ring_req, &msg);
if (result != 0)
return;
ring_resp = params->ring_resp;
p = params->p;
port_out_id = params->port_out_id;
routing_table_id = params->routing_table_id;
arp_table_id = params->arp_table_id;
/* Handle request */
req = (struct app_msg_req *) ((struct rte_mbuf *)msg)->ctrl.data;
switch (req->type) {
case APP_MSG_REQ_PING:
{
result = 0;
break;
}
case APP_MSG_REQ_RT_ADD:
{
struct app_routing_table_entry entry = {
.head = {
.action = RTE_PIPELINE_ACTION_TABLE,
{.table_id = arp_table_id},
},
.nh_ip = req->routing_add.nh_ip,
.nh_iface = port_out_id[req->routing_add.port],
};
struct rte_table_lpm_key key = {
.ip = req->routing_add.ip,
.depth = req->routing_add.depth,
};
struct rte_pipeline_table_entry *entry_ptr;
int key_found;
result = rte_pipeline_table_entry_add(p, routing_table_id, &key,
(struct rte_pipeline_table_entry *) &entry, &key_found,
&entry_ptr);
break;
}
case APP_MSG_REQ_RT_DEL:
{
struct rte_table_lpm_key key = {
.ip = req->routing_del.ip,
.depth = req->routing_del.depth,
};
int key_found;
result = rte_pipeline_table_entry_delete(p, routing_table_id,
&key, &key_found, NULL);
break;
}
case APP_MSG_REQ_ARP_ADD:
{
struct app_arp_table_entry entry = {
.head = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id =
port_out_id[req->arp_add.out_iface]},
},
.nh_arp = req->arp_add.nh_arp,
};
struct app_arp_key arp_key = {
.nh_ip = req->arp_add.nh_ip,
.nh_iface = port_out_id[req->arp_add.out_iface],
};
struct rte_pipeline_table_entry *entry_ptr;
int key_found;
result = rte_pipeline_table_entry_add(p, arp_table_id, &arp_key,
(struct rte_pipeline_table_entry *) &entry, &key_found,
&entry_ptr);
break;
}
case APP_MSG_REQ_ARP_DEL:
{
struct app_arp_key arp_key = {
.nh_ip = req->arp_del.nh_ip,
.nh_iface = port_out_id[req->arp_del.out_iface],
};
int key_found;
result = rte_pipeline_table_entry_delete(p, arp_table_id,
&arp_key, &key_found, NULL);
break;
}
default:
rte_panic("RT Unrecognized message type (%u)\n", req->type);
}
/* Fill in response message */
resp = (struct app_msg_resp *) ((struct rte_mbuf *)msg)->ctrl.data;
resp->result = result;
/* Send response */
do {
result = rte_ring_sp_enqueue(ring_resp, msg);
} while (result == -ENOBUFS);
}

View File

@ -0,0 +1,385 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_jhash.h>
#include <rte_port_ethdev.h>
#include <rte_port_ring.h>
#include <rte_table_stub.h>
#include <rte_pipeline.h>
#include "main.h"
struct app_core_rx_message_handle_params {
struct rte_ring *ring_req;
struct rte_ring *ring_resp;
struct rte_pipeline *p;
uint32_t *port_in_id;
};
static void
app_message_handle(struct app_core_rx_message_handle_params *params);
static int
app_pipeline_rx_port_in_action_handler(struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, void *arg);
void
app_main_loop_pipeline_rx(void) {
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id[APP_MAX_PORTS];
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
struct app_core_rx_message_handle_params mh_params;
if ((core_params == NULL) || (core_params->core_type != APP_CORE_RX))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing RX\n", core_id);
/* Pipeline configuration */
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ethdev_reader_params port_ethdev_params = {
.port_id = app.ports[i],
.queue_id = 0,
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ethdev_reader_ops,
.arg_create = (void *) &port_ethdev_params,
.f_action = app_pipeline_rx_port_in_action_handler,
.arg_ah = NULL,
.burst_size = app.bsz_hwq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i]))
rte_panic("%s: Unable to configure input port for "
"port %d\n", __func__, app.ports[i]);
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i]))
rte_panic("%s: Unable to configure output port for "
"ring RX %i\n", __func__, i);
}
/* Table configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops,
.arg_create = NULL,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
rte_panic("%s: Unable to configure table %u\n",
__func__, table_id[i]);
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id[i]))
rte_panic("%s: Unable to connect input port %u to "
"table %u\n", __func__, port_in_id[i],
table_id[i]);
/* Add entries to tables */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
if (rte_pipeline_table_default_entry_add(p, table_id[i],
&default_entry, &default_entry_ptr))
rte_panic("%s: Unable to add default entry to "
"table %u\n", __func__, table_id[i]);
}
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
/* Message handling */
mh_params.ring_req =
app_get_ring_req(app_get_first_core_id(APP_CORE_RX));
mh_params.ring_resp =
app_get_ring_resp(app_get_first_core_id(APP_CORE_RX));
mh_params.p = p;
mh_params.port_in_id = port_in_id;
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0) {
rte_pipeline_flush(p);
app_message_handle(&mh_params);
}
}
}
uint64_t test_hash(
void *key,
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed)
{
struct app_flow_key *flow_key = (struct app_flow_key *) key;
uint32_t ip_dst = rte_be_to_cpu_32(flow_key->ip_dst);
uint64_t signature = (ip_dst & 0x00FFFFFFLLU) >> 2;
return signature;
}
uint32_t
rte_jhash2_16(uint32_t *k, uint32_t initval)
{
uint32_t a, b, c;
a = b = RTE_JHASH_GOLDEN_RATIO;
c = initval;
a += k[0];
b += k[1];
c += k[2];
__rte_jhash_mix(a, b, c);
c += 16; /* length in bytes */
a += k[3]; /* Remaining word */
__rte_jhash_mix(a, b, c);
return c;
}
static inline void
app_pkt_metadata_fill(struct rte_mbuf *m)
{
uint8_t *m_data = rte_pktmbuf_mtod(m, uint8_t *);
struct app_pkt_metadata *c =
(struct app_pkt_metadata *) RTE_MBUF_METADATA_UINT8_PTR(m, 0);
struct ipv4_hdr *ip_hdr =
(struct ipv4_hdr *) &m_data[sizeof(struct ether_hdr)];
uint64_t *ipv4_hdr_slab = (uint64_t *) ip_hdr;
/* TTL and Header Checksum are set to 0 */
c->flow_key.slab0 = ipv4_hdr_slab[1] & 0xFFFFFFFF0000FF00LLU;
c->flow_key.slab1 = ipv4_hdr_slab[2];
c->signature = test_hash((void *) &c->flow_key, 0, 0);
/* Pop Ethernet header */
if (app.ether_hdr_pop_push) {
rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
m->pkt.vlan_macip.f.l2_len = 0;
m->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
}
}
int
app_pipeline_rx_port_in_action_handler(
struct rte_mbuf **pkts,
uint32_t n,
uint64_t *pkts_mask,
__rte_unused void *arg)
{
uint32_t i;
for (i = 0; i < n; i++) {
struct rte_mbuf *m = pkts[i];
app_pkt_metadata_fill(m);
}
*pkts_mask = (~0LLU) >> (64 - n);
return 0;
}
void
app_main_loop_rx(void) {
struct app_mbuf_array *ma;
uint32_t i, j;
int ret;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) || (core_params->core_type != APP_CORE_RX))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing RX (no pipeline)\n", core_id);
ma = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
CACHE_LINE_SIZE, rte_socket_id());
if (ma == NULL)
rte_panic("%s: cannot allocate buffer space\n", __func__);
for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
uint32_t n_mbufs;
n_mbufs = rte_eth_rx_burst(
app.ports[i],
0,
ma->array,
app.bsz_hwq_rd);
if (n_mbufs == 0)
continue;
for (j = 0; j < n_mbufs; j++) {
struct rte_mbuf *m = ma->array[j];
app_pkt_metadata_fill(m);
}
do {
ret = rte_ring_sp_enqueue_bulk(
app.rings[core_params->swq_out[i]],
(void **) ma->array,
n_mbufs);
} while (ret < 0);
}
}
void
app_message_handle(struct app_core_rx_message_handle_params *params)
{
struct rte_ring *ring_req = params->ring_req;
struct rte_ring *ring_resp;
void *msg;
struct app_msg_req *req;
struct app_msg_resp *resp;
struct rte_pipeline *p;
uint32_t *port_in_id;
int result;
/* Read request message */
result = rte_ring_sc_dequeue(ring_req, &msg);
if (result != 0)
return;
ring_resp = params->ring_resp;
p = params->p;
port_in_id = params->port_in_id;
/* Handle request */
req = (struct app_msg_req *) ((struct rte_mbuf *)msg)->ctrl.data;
switch (req->type) {
case APP_MSG_REQ_PING:
{
result = 0;
break;
}
case APP_MSG_REQ_RX_PORT_ENABLE:
{
result = rte_pipeline_port_in_enable(p,
port_in_id[req->rx_up.port]);
break;
}
case APP_MSG_REQ_RX_PORT_DISABLE:
{
result = rte_pipeline_port_in_disable(p,
port_in_id[req->rx_down.port]);
break;
}
default:
rte_panic("RX Unrecognized message type (%u)\n", req->type);
}
/* Fill in response message */
resp = (struct app_msg_resp *) ((struct rte_mbuf *)msg)->ctrl.data;
resp->result = result;
/* Send response */
do {
result = rte_ring_sp_enqueue(ring_resp, msg);
} while (result == -ENOBUFS);
}

View File

@ -0,0 +1,283 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_port_ethdev.h>
#include <rte_port_ring.h>
#include <rte_table_stub.h>
#include <rte_pipeline.h>
#include "main.h"
static struct ether_addr local_ether_addr = {
.addr_bytes = {0, 1, 2, 3, 4, 5},
};
static inline void
app_pkt_metadata_flush(struct rte_mbuf *pkt)
{
struct app_pkt_metadata *pkt_meta = (struct app_pkt_metadata *)
RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
struct ether_hdr *ether_hdr = (struct ether_hdr *)
rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(struct ether_hdr));
ether_addr_copy(&pkt_meta->nh_arp, &ether_hdr->d_addr);
ether_addr_copy(&local_ether_addr, &ether_hdr->s_addr);
ether_hdr->ether_type = rte_bswap16(ETHER_TYPE_IPv4);
pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
}
static int
app_pipeline_tx_port_in_action_handler(
struct rte_mbuf **pkts,
uint32_t n,
uint64_t *pkts_mask,
__rte_unused void *arg)
{
uint32_t i;
for (i = 0; i < n; i++) {
struct rte_mbuf *m = pkts[i];
app_pkt_metadata_flush(m);
}
*pkts_mask = (~0LLU) >> (64 - n);
return 0;
}
void
app_main_loop_pipeline_tx(void) {
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id[APP_MAX_PORTS];
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing TX\n", core_id);
/* Pipeline configuration */
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_params port_ring_params = {
.ring = app.rings[core_params->swq_in[i]],
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ops,
.arg_create = (void *) &port_ring_params,
.f_action = (app.ether_hdr_pop_push) ?
app_pipeline_tx_port_in_action_handler : NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i])) {
rte_panic("%s: Unable to configure input port for "
"ring TX %i\n", __func__, i);
}
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ethdev_writer_params port_ethdev_params = {
.port_id = app.ports[i],
.queue_id = 0,
.tx_burst_sz = app.bsz_hwq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ethdev_writer_ops,
.arg_create = (void *) &port_ethdev_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i])) {
rte_panic("%s: Unable to configure output port for "
"port %d\n", __func__, app.ports[i]);
}
}
/* Table configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops,
.arg_create = NULL,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
if (rte_pipeline_table_create(p, &table_params, &table_id[i])) {
rte_panic("%s: Unable to configure table %u\n",
__func__, table_id[i]);
}
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id[i]))
rte_panic("%s: Unable to connect input port %u to "
"table %u\n", __func__, port_in_id[i],
table_id[i]);
/* Add entries to tables */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
if (rte_pipeline_table_default_entry_add(p, table_id[i],
&default_entry, &default_entry_ptr))
rte_panic("%s: Unable to add default entry to "
"table %u\n", __func__, table_id[i]);
}
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0)
rte_pipeline_flush(p);
}
}
void
app_main_loop_tx(void) {
struct app_mbuf_array *m[APP_MAX_PORTS];
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) || (core_params->core_type != APP_CORE_TX))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing TX (no pipeline)\n", core_id);
for (i = 0; i < APP_MAX_PORTS; i++) {
m[i] = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
CACHE_LINE_SIZE, rte_socket_id());
if (m[i] == NULL)
rte_panic("%s: Cannot allocate buffer space\n",
__func__);
}
for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
uint32_t n_mbufs, n_pkts;
int ret;
n_mbufs = m[i]->n_mbufs;
ret = rte_ring_sc_dequeue_bulk(
app.rings[core_params->swq_in[i]],
(void **) &m[i]->array[n_mbufs],
app.bsz_swq_rd);
if (ret == -ENOENT)
continue;
n_mbufs += app.bsz_swq_rd;
if (n_mbufs < app.bsz_hwq_wr) {
m[i]->n_mbufs = n_mbufs;
continue;
}
n_pkts = rte_eth_tx_burst(
app.ports[i],
0,
m[i]->array,
n_mbufs);
if (n_pkts < n_mbufs) {
uint32_t k;
for (k = n_pkts; k < n_mbufs; k++) {
struct rte_mbuf *pkt_to_free;
pkt_to_free = m[i]->array[k];
rte_pktmbuf_free(pkt_to_free);
}
}
m[i]->n_mbufs = 0;
}
}