examples/ip_pipeline: rework passthrough pipeline

Passthrough pipeline implementation is split to two files.
pipeline_passthrough.c file handles front-end functions (cli commands
parsing) pipeline_passthrough_ops.c contains implementation of functions
done by pipeline (back-end).

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
This commit is contained in:
Jasvinder Singh 2015-07-07 10:09:32 +02:00 committed by Thomas Monjalon
parent 147fc74f44
commit 947024a26d
8 changed files with 1341 additions and 179 deletions

View File

@ -60,6 +60,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_be.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_fe.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master_be.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_passthrough_be.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_passthrough.c
CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/pipeline
CFLAGS += -O3

View File

@ -46,6 +46,7 @@
#include "pipeline.h"
#include "pipeline_common_fe.h"
#include "pipeline_master.h"
#include "pipeline_passthrough.h"
#define APP_NAME_SIZE 32
@ -1285,6 +1286,7 @@ int app_init(struct app_params *app)
app_pipeline_common_cmd_push(app);
app_pipeline_type_register(app, &pipeline_master);
app_pipeline_type_register(app, &pipeline_passthrough);
app_init_pipelines(app);
app_init_threads(app);

View File

@ -0,0 +1,351 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __INCLUDE_HASH_FUNC_H__
#define __INCLUDE_HASH_FUNC_H__
static inline uint64_t
hash_xor_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0;
xor0 = seed ^ k[0];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
hash_xor_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0;
xor0 = (k[0] ^ seed) ^ k[1];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
hash_xor_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0;
xor0 = (k[0] ^ seed) ^ k[1];
xor0 ^= k[2];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0, xor1;
xor0 = (k[0] ^ seed) ^ k[1];
xor1 = k[2] ^ k[3];
xor0 ^= xor1;
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
hash_xor_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0, xor1;
xor0 = (k[0] ^ seed) ^ k[1];
xor1 = k[2] ^ k[3];
xor0 ^= xor1;
xor0 ^= k[4];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0, xor1, xor2;
xor0 = (k[0] ^ seed) ^ k[1];
xor1 = k[2] ^ k[3];
xor2 = k[4] ^ k[5];
xor0 ^= xor1;
xor0 ^= xor2;
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0, xor1, xor2;
xor0 = (k[0] ^ seed) ^ k[1];
xor1 = k[2] ^ k[3];
xor2 = k[4] ^ k[5];
xor0 ^= xor1;
xor2 ^= k[6];
xor0 ^= xor2;
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t xor0, xor1, xor2, xor3;
xor0 = (k[0] ^ seed) ^ k[1];
xor1 = k[2] ^ k[3];
xor2 = k[4] ^ k[5];
xor3 = k[6] ^ k[7];
xor0 ^= xor1;
xor2 ^= xor3;
xor0 ^= xor2;
return (xor0 >> 32) ^ xor0;
}
#if defined(__x86_64__)
#include <x86intrin.h>
static inline uint64_t
hash_crc_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t crc0;
crc0 = _mm_crc32_u64(seed, k[0]);
return crc0;
}
static inline uint64_t
hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t k0, crc0, crc1;
k0 = k[0];
crc0 = _mm_crc32_u64(k0, seed);
crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
crc0 ^= crc1;
return crc0;
}
static inline uint64_t
hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t k0, k2, crc0, crc1;
k0 = k[0];
k2 = k[2];
crc0 = _mm_crc32_u64(k0, seed);
crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
crc0 = _mm_crc32_u64(crc0, k2);
crc0 ^= crc1;
return crc0;
}
static inline uint64_t
hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t k0, k2, crc0, crc1, crc2, crc3;
k0 = k[0];
k2 = k[2];
crc0 = _mm_crc32_u64(k0, seed);
crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
crc2 = _mm_crc32_u64(k2, k[3]);
crc3 = k2 >> 32;
crc0 = _mm_crc32_u64(crc0, crc1);
crc1 = _mm_crc32_u64(crc2, crc3);
crc0 ^= crc1;
return crc0;
}
static inline uint64_t
hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t k0, k2, crc0, crc1, crc2, crc3;
k0 = k[0];
k2 = k[2];
crc0 = _mm_crc32_u64(k0, seed);
crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
crc2 = _mm_crc32_u64(k2, k[3]);
crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
crc0 = _mm_crc32_u64(crc0, crc1);
crc1 = _mm_crc32_u64(crc2, crc3);
crc0 ^= crc1;
return crc0;
}
static inline uint64_t
hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
k0 = k[0];
k2 = k[2];
k5 = k[5];
crc0 = _mm_crc32_u64(k0, seed);
crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
crc2 = _mm_crc32_u64(k2, k[3]);
crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
crc1 = _mm_crc32_u64(crc3, k5);
crc0 ^= crc1;
return crc0;
}
static inline uint64_t
hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
k0 = k[0];
k2 = k[2];
k5 = k[5];
crc0 = _mm_crc32_u64(k0, seed);
crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
crc2 = _mm_crc32_u64(k2, k[3]);
crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
crc4 = _mm_crc32_u64(k5, k[6]);
crc5 = k5 >> 32;
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
crc0 ^= crc1;
return crc0;
}
static inline uint64_t
hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
{
uint64_t *k = key;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
k0 = k[0];
k2 = k[2];
k5 = k[5];
crc0 = _mm_crc32_u64(k0, seed);
crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
crc2 = _mm_crc32_u64(k2, k[3]);
crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
crc4 = _mm_crc32_u64(k5, k[6]);
crc5 = _mm_crc32_u64(k5 >> 32, k[7]);
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
crc0 ^= crc1;
return crc0;
}
#define hash_default_key8 hash_crc_key8
#define hash_default_key16 hash_crc_key16
#define hash_default_key24 hash_crc_key24
#define hash_default_key32 hash_crc_key32
#define hash_default_key40 hash_crc_key40
#define hash_default_key48 hash_crc_key48
#define hash_default_key56 hash_crc_key56
#define hash_default_key64 hash_crc_key64
#else
#define hash_default_key8 hash_xor_key8
#define hash_default_key16 hash_xor_key16
#define hash_default_key24 hash_xor_key24
#define hash_default_key32 hash_xor_key32
#define hash_default_key40 hash_xor_key40
#define hash_default_key48 hash_xor_key48
#define hash_default_key56 hash_xor_key56
#define hash_default_key64 hash_xor_key64
#endif
#endif

View File

@ -0,0 +1,119 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
#define __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
#define PIPELINE_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
static int \
f_ah( \
struct rte_mbuf **pkts, \
uint32_t n_pkts, \
uint64_t *pkts_mask, \
void *arg) \
{ \
uint32_t i; \
\
for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
f_pkt4_work(&pkts[i], arg); \
\
for ( ; i < n_pkts; i++) \
f_pkt_work(pkts[i], arg); \
\
*pkts_mask = (~0LLU) >> (64 - n_pkts); \
\
return 0; \
}
#define PIPELINE_TABLE_AH_HIT(f_ah, f_pkt_work, f_pkt4_work) \
static int \
f_ah( \
struct rte_mbuf **pkts, \
uint64_t *pkts_mask, \
struct rte_pipeline_table_entry **entries, \
void *arg) \
{ \
uint64_t pkts_in_mask = *pkts_mask; \
\
if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
uint32_t i; \
\
for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
f_pkt4_work(&pkts[i], &entries[i], arg); \
\
for ( ; i < n_pkts; i++) \
f_pkt_work(pkts[i], entries[i], arg); \
} else \
for ( ; pkts_in_mask; ) { \
uint32_t pos = __builtin_ctzll(pkts_in_mask); \
uint64_t pkt_mask = 1LLU << pos; \
\
pkts_in_mask &= ~pkt_mask; \
f_pkt_work(pkts[pos], entries[pos], arg); \
} \
\
return 0; \
}
#define PIPELINE_TABLE_AH_MISS(f_ah, f_pkt_work, f_pkt4_work) \
static int \
f_ah( \
struct rte_mbuf **pkts, \
uint64_t *pkts_mask, \
struct rte_pipeline_table_entry *entry, \
void *arg) \
{ \
uint64_t pkts_in_mask = *pkts_mask; \
\
if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
uint32_t i; \
\
for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
f_pkt4_work(&pkts[i], entry, arg); \
\
for ( ; i < n_pkts; i++) \
f_pkt_work(pkts[i], entry, arg); \
} else \
for ( ; pkts_in_mask; ) { \
uint32_t pos = __builtin_ctzll(pkts_in_mask); \
uint64_t pkt_mask = 1LLU << pos; \
\
pkts_in_mask &= ~pkt_mask; \
f_pkt_work(pkts[pos], entry, arg); \
} \
\
return 0; \
}
#endif

View File

@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,183 +31,17 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "pipeline_passthrough.h"
#include "pipeline_passthrough_be.h"
#include <rte_malloc.h>
#include <rte_log.h>
static struct pipeline_fe_ops pipeline_passthrough_fe_ops = {
.f_init = NULL,
.f_free = NULL,
.cmds = NULL,
};
#include <rte_port_ring.h>
#include <rte_table_stub.h>
#include <rte_pipeline.h>
#include "main.h"
void
app_main_loop_pipeline_passthrough(void) {
struct rte_pipeline_params pipeline_params = {
.name = "pipeline",
.socket_id = rte_socket_id(),
};
struct rte_pipeline *p;
uint32_t port_in_id[APP_MAX_PORTS];
uint32_t port_out_id[APP_MAX_PORTS];
uint32_t table_id[APP_MAX_PORTS];
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing pass-through\n", core_id);
/* Pipeline configuration */
p = rte_pipeline_create(&pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_reader_params port_ring_params = {
.ring = app.rings[core_params->swq_in[i]],
};
struct rte_pipeline_port_in_params port_params = {
.ops = &rte_port_ring_reader_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.arg_ah = NULL,
.burst_size = app.bsz_swq_rd,
};
if (rte_pipeline_port_in_create(p, &port_params,
&port_in_id[i])) {
rte_panic("%s: Unable to configure input port for "
"ring %d\n", __func__, i);
}
}
/* Output port configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_port_ring_writer_params port_ring_params = {
.ring = app.rings[core_params->swq_out[i]],
.tx_burst_sz = app.bsz_swq_wr,
};
struct rte_pipeline_port_out_params port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *) &port_ring_params,
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
if (rte_pipeline_port_out_create(p, &port_params,
&port_out_id[i])) {
rte_panic("%s: Unable to configure output port for "
"ring %d\n", __func__, i);
}
}
/* Table configuration */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops,
.arg_create = NULL,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
if (rte_pipeline_table_create(p, &table_params, &table_id[i]))
rte_panic("%s: Unable to configure table %u\n",
__func__, i);
}
/* Interconnecting ports and tables */
for (i = 0; i < app.n_ports; i++) {
if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i],
table_id[i])) {
rte_panic("%s: Unable to connect input port %u to "
"table %u\n", __func__, port_in_id[i],
table_id[i]);
}
}
/* Add entries to tables */
for (i = 0; i < app.n_ports; i++) {
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
if (rte_pipeline_table_default_entry_add(p, table_id[i],
&default_entry, &default_entry_ptr))
rte_panic("%s: Unable to add default entry to "
"table %u\n", __func__, table_id[i]);
}
/* Enable input ports */
for (i = 0; i < app.n_ports; i++)
if (rte_pipeline_port_in_enable(p, port_in_id[i]))
rte_panic("Unable to enable input port %u\n",
port_in_id[i]);
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
/* Run-time */
for (i = 0; ; i++) {
rte_pipeline_run(p);
if ((i & APP_FLUSH) == 0)
rte_pipeline_flush(p);
}
}
void
app_main_loop_passthrough(void) {
struct app_mbuf_array *m;
uint32_t i;
uint32_t core_id = rte_lcore_id();
struct app_core_params *core_params = app_get_core_params(core_id);
if ((core_params == NULL) || (core_params->core_type != APP_CORE_PT))
rte_panic("Core %u misconfiguration\n", core_id);
RTE_LOG(INFO, USER1, "Core %u is doing pass-through (no pipeline)\n",
core_id);
m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (m == NULL)
rte_panic("%s: cannot allocate buffer space\n", __func__);
for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) {
int ret;
ret = rte_ring_sc_dequeue_bulk(
app.rings[core_params->swq_in[i]],
(void **) m->array,
app.bsz_swq_rd);
if (ret == -ENOENT)
continue;
do {
ret = rte_ring_sp_enqueue_bulk(
app.rings[core_params->swq_out[i]],
(void **) m->array,
app.bsz_swq_wr);
} while (ret < 0);
}
}
struct pipeline_type pipeline_passthrough = {
.name = "PASS-THROUGH",
.be_ops = &pipeline_passthrough_be_ops,
.fe_ops = &pipeline_passthrough_fe_ops,
};

View File

@ -0,0 +1,41 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_H__
#define __INCLUDE_PIPELINE_PASSTHROUGH_H__
#include "pipeline.h"
extern struct pipeline_type pipeline_passthrough;
#endif

View File

@ -0,0 +1,772 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_byteorder.h>
#include <rte_table_stub.h>
#include <rte_table_hash.h>
#include <rte_pipeline.h>
#include "pipeline_passthrough_be.h"
#include "pipeline_actions_common.h"
#include "hash_func.h"
enum flow_key_type {
FLOW_KEY_QINQ,
FLOW_KEY_IPV4_5TUPLE,
FLOW_KEY_IPV6_5TUPLE,
};
struct pipeline_passthrough {
struct pipeline p;
uint32_t key_type_valid;
enum flow_key_type key_type;
uint32_t key_offset_rd;
uint32_t key_offset_wr;
uint32_t hash_offset;
rte_table_hash_op_hash f_hash;
rte_pipeline_port_in_action_handler f_port_in_ah;
} __rte_cache_aligned;
static pipeline_msg_req_handler handlers[] = {
[PIPELINE_MSG_REQ_PING] =
pipeline_msg_req_ping_handler,
[PIPELINE_MSG_REQ_STATS_PORT_IN] =
pipeline_msg_req_stats_port_in_handler,
[PIPELINE_MSG_REQ_STATS_PORT_OUT] =
pipeline_msg_req_stats_port_out_handler,
[PIPELINE_MSG_REQ_STATS_TABLE] =
pipeline_msg_req_stats_table_handler,
[PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
pipeline_msg_req_port_in_enable_handler,
[PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
pipeline_msg_req_port_in_disable_handler,
[PIPELINE_MSG_REQ_CUSTOM] =
pipeline_msg_req_invalid_handler,
};
static inline void
pkt_work_key_qinq(
struct rte_mbuf *pkt,
void *arg)
{
struct pipeline_passthrough *p_pt = arg;
uint32_t key_offset_rd = p_pt->key_offset_rd;
uint32_t key_offset_wr = p_pt->key_offset_wr;
uint32_t hash_offset = p_pt->hash_offset;
uint64_t *key_rd = RTE_MBUF_METADATA_UINT64_PTR(pkt, key_offset_rd);
uint64_t *key_wr = RTE_MBUF_METADATA_UINT64_PTR(pkt, key_offset_wr);
uint32_t *hash = RTE_MBUF_METADATA_UINT32_PTR(pkt, hash_offset);
/* Read */
uint64_t key_qinq = *key_rd & rte_bswap64(0x00000FFF00000FFFLLU);
/* Compute */
uint32_t hash_qinq = p_pt->f_hash(&key_qinq, 8, 0);
/* Write */
*key_wr = key_qinq;
*hash = hash_qinq;
}
static inline void
pkt4_work_key_qinq(
struct rte_mbuf **pkt,
void *arg)
{
struct pipeline_passthrough *p_pt = arg;
uint32_t key_offset_rd = p_pt->key_offset_rd;
uint32_t key_offset_wr = p_pt->key_offset_wr;
uint32_t hash_offset = p_pt->hash_offset;
uint64_t *key_rd0 = RTE_MBUF_METADATA_UINT64_PTR(pkt[0], key_offset_rd);
uint64_t *key_wr0 = RTE_MBUF_METADATA_UINT64_PTR(pkt[0], key_offset_wr);
uint32_t *hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkt[0], hash_offset);
uint64_t *key_rd1 = RTE_MBUF_METADATA_UINT64_PTR(pkt[1], key_offset_rd);
uint64_t *key_wr1 = RTE_MBUF_METADATA_UINT64_PTR(pkt[1], key_offset_wr);
uint32_t *hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkt[1], hash_offset);
uint64_t *key_rd2 = RTE_MBUF_METADATA_UINT64_PTR(pkt[2], key_offset_rd);
uint64_t *key_wr2 = RTE_MBUF_METADATA_UINT64_PTR(pkt[2], key_offset_wr);
uint32_t *hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkt[2], hash_offset);
uint64_t *key_rd3 = RTE_MBUF_METADATA_UINT64_PTR(pkt[3], key_offset_rd);
uint64_t *key_wr3 = RTE_MBUF_METADATA_UINT64_PTR(pkt[3], key_offset_wr);
uint32_t *hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkt[3], hash_offset);
/* Read */
uint64_t key_qinq0 = *key_rd0 & rte_bswap64(0x00000FFF00000FFFLLU);
uint64_t key_qinq1 = *key_rd1 & rte_bswap64(0x00000FFF00000FFFLLU);
uint64_t key_qinq2 = *key_rd2 & rte_bswap64(0x00000FFF00000FFFLLU);
uint64_t key_qinq3 = *key_rd3 & rte_bswap64(0x00000FFF00000FFFLLU);
/* Compute */
uint32_t hash_qinq0 = p_pt->f_hash(&key_qinq0, 8, 0);
uint32_t hash_qinq1 = p_pt->f_hash(&key_qinq1, 8, 0);
uint32_t hash_qinq2 = p_pt->f_hash(&key_qinq2, 8, 0);
uint32_t hash_qinq3 = p_pt->f_hash(&key_qinq3, 8, 0);
/* Write */
*key_wr0 = key_qinq0;
*key_wr1 = key_qinq1;
*key_wr2 = key_qinq2;
*key_wr3 = key_qinq3;
*hash0 = hash_qinq0;
*hash1 = hash_qinq1;
*hash2 = hash_qinq2;
*hash3 = hash_qinq3;
}
PIPELINE_PORT_IN_AH(port_in_ah_key_qinq, pkt_work_key_qinq, pkt4_work_key_qinq);
static inline void
pkt_work_key_ipv4(
struct rte_mbuf *pkt,
void *arg)
{
struct pipeline_passthrough *p_pt = arg;
uint32_t key_offset_rd = p_pt->key_offset_rd;
uint32_t key_offset_wr = p_pt->key_offset_wr;
uint32_t hash_offset = p_pt->hash_offset;
uint64_t *key_rd = RTE_MBUF_METADATA_UINT64_PTR(pkt, key_offset_rd);
uint64_t *key_wr = RTE_MBUF_METADATA_UINT64_PTR(pkt, key_offset_wr);
uint32_t *hash = RTE_MBUF_METADATA_UINT32_PTR(pkt, hash_offset);
uint64_t key_ipv4[2];
uint32_t hash_ipv4;
/* Read */
key_ipv4[0] = key_rd[0] & rte_bswap64(0x00FF0000FFFFFFFFLLU);
key_ipv4[1] = key_rd[1];
/* Compute */
hash_ipv4 = p_pt->f_hash(key_ipv4, 16, 0);
/* Write */
key_wr[0] = key_ipv4[0];
key_wr[1] = key_ipv4[1];
*hash = hash_ipv4;
}
static inline void
pkt4_work_key_ipv4(
struct rte_mbuf **pkt,
void *arg)
{
struct pipeline_passthrough *p_pt = arg;
uint32_t key_offset_rd = p_pt->key_offset_rd;
uint32_t key_offset_wr = p_pt->key_offset_wr;
uint32_t hash_offset = p_pt->hash_offset;
uint64_t *key_rd0 = RTE_MBUF_METADATA_UINT64_PTR(pkt[0], key_offset_rd);
uint64_t *key_wr0 = RTE_MBUF_METADATA_UINT64_PTR(pkt[0], key_offset_wr);
uint32_t *hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkt[0], hash_offset);
uint64_t *key_rd1 = RTE_MBUF_METADATA_UINT64_PTR(pkt[1], key_offset_rd);
uint64_t *key_wr1 = RTE_MBUF_METADATA_UINT64_PTR(pkt[1], key_offset_wr);
uint32_t *hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkt[1], hash_offset);
uint64_t *key_rd2 = RTE_MBUF_METADATA_UINT64_PTR(pkt[2], key_offset_rd);
uint64_t *key_wr2 = RTE_MBUF_METADATA_UINT64_PTR(pkt[2], key_offset_wr);
uint32_t *hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkt[2], hash_offset);
uint64_t *key_rd3 = RTE_MBUF_METADATA_UINT64_PTR(pkt[3], key_offset_rd);
uint64_t *key_wr3 = RTE_MBUF_METADATA_UINT64_PTR(pkt[3], key_offset_wr);
uint32_t *hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkt[3], hash_offset);
uint64_t key_ipv4_0[2];
uint64_t key_ipv4_1[2];
uint64_t key_ipv4_2[2];
uint64_t key_ipv4_3[2];
uint32_t hash_ipv4_0;
uint32_t hash_ipv4_1;
uint32_t hash_ipv4_2;
uint32_t hash_ipv4_3;
/* Read */
key_ipv4_0[0] = key_rd0[0] & rte_bswap64(0x00FF0000FFFFFFFFLLU);
key_ipv4_1[0] = key_rd1[0] & rte_bswap64(0x00FF0000FFFFFFFFLLU);
key_ipv4_2[0] = key_rd2[0] & rte_bswap64(0x00FF0000FFFFFFFFLLU);
key_ipv4_3[0] = key_rd3[0] & rte_bswap64(0x00FF0000FFFFFFFFLLU);
key_ipv4_0[1] = key_rd0[1];
key_ipv4_1[1] = key_rd1[1];
key_ipv4_2[1] = key_rd2[1];
key_ipv4_3[1] = key_rd3[1];
/* Compute */
hash_ipv4_0 = p_pt->f_hash(key_ipv4_0, 16, 0);
hash_ipv4_1 = p_pt->f_hash(key_ipv4_1, 16, 0);
hash_ipv4_2 = p_pt->f_hash(key_ipv4_2, 16, 0);
hash_ipv4_3 = p_pt->f_hash(key_ipv4_3, 16, 0);
/* Write */
key_wr0[0] = key_ipv4_0[0];
key_wr1[0] = key_ipv4_1[0];
key_wr2[0] = key_ipv4_2[0];
key_wr3[0] = key_ipv4_3[0];
key_wr0[1] = key_ipv4_0[1];
key_wr1[1] = key_ipv4_1[1];
key_wr2[1] = key_ipv4_2[1];
key_wr3[1] = key_ipv4_3[1];
*hash0 = hash_ipv4_0;
*hash1 = hash_ipv4_1;
*hash2 = hash_ipv4_2;
*hash3 = hash_ipv4_3;
}
PIPELINE_PORT_IN_AH(port_in_ah_key_ipv4, pkt_work_key_ipv4, pkt4_work_key_ipv4);
static inline void
pkt_work_key_ipv6(
struct rte_mbuf *pkt,
void *arg)
{
struct pipeline_passthrough *p_pt = arg;
uint32_t key_offset_rd = p_pt->key_offset_rd;
uint32_t key_offset_wr = p_pt->key_offset_wr;
uint32_t hash_offset = p_pt->hash_offset;
uint64_t *key_rd = RTE_MBUF_METADATA_UINT64_PTR(pkt, key_offset_rd);
uint64_t *key_wr = RTE_MBUF_METADATA_UINT64_PTR(pkt, key_offset_wr);
uint32_t *hash = RTE_MBUF_METADATA_UINT32_PTR(pkt, hash_offset);
uint64_t key_ipv6[8];
uint32_t hash_ipv6;
/* Read */
key_ipv6[0] = key_rd[0] & rte_bswap64(0x0000FF00FFFFFFFFLLU);
key_ipv6[1] = key_rd[1];
key_ipv6[2] = key_rd[2];
key_ipv6[3] = key_rd[3];
key_ipv6[4] = key_rd[4];
key_ipv6[5] = 0;
key_ipv6[6] = 0;
key_ipv6[7] = 0;
/* Compute */
hash_ipv6 = p_pt->f_hash(key_ipv6, 64, 0);
/* Write */
key_wr[0] = key_ipv6[0];
key_wr[1] = key_ipv6[1];
key_wr[2] = key_ipv6[2];
key_wr[3] = key_ipv6[3];
key_wr[4] = key_ipv6[4];
key_wr[5] = 0;
key_wr[6] = 0;
key_wr[7] = 0;
*hash = hash_ipv6;
}
static inline void
pkt4_work_key_ipv6(
struct rte_mbuf **pkt,
void *arg)
{
struct pipeline_passthrough *p_pt = arg;
uint32_t key_offset_rd = p_pt->key_offset_rd;
uint32_t key_offset_wr = p_pt->key_offset_wr;
uint32_t hash_offset = p_pt->hash_offset;
uint64_t *key_rd0 = RTE_MBUF_METADATA_UINT64_PTR(pkt[0], key_offset_rd);
uint64_t *key_wr0 = RTE_MBUF_METADATA_UINT64_PTR(pkt[0], key_offset_wr);
uint32_t *hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkt[0], hash_offset);
uint64_t *key_rd1 = RTE_MBUF_METADATA_UINT64_PTR(pkt[1], key_offset_rd);
uint64_t *key_wr1 = RTE_MBUF_METADATA_UINT64_PTR(pkt[1], key_offset_wr);
uint32_t *hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkt[1], hash_offset);
uint64_t *key_rd2 = RTE_MBUF_METADATA_UINT64_PTR(pkt[2], key_offset_rd);
uint64_t *key_wr2 = RTE_MBUF_METADATA_UINT64_PTR(pkt[2], key_offset_wr);
uint32_t *hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkt[2], hash_offset);
uint64_t *key_rd3 = RTE_MBUF_METADATA_UINT64_PTR(pkt[3], key_offset_rd);
uint64_t *key_wr3 = RTE_MBUF_METADATA_UINT64_PTR(pkt[3], key_offset_wr);
uint32_t *hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkt[3], hash_offset);
uint64_t key_ipv6_0[8];
uint64_t key_ipv6_1[8];
uint64_t key_ipv6_2[8];
uint64_t key_ipv6_3[8];
uint32_t hash_ipv6_0;
uint32_t hash_ipv6_1;
uint32_t hash_ipv6_2;
uint32_t hash_ipv6_3;
/* Read */
key_ipv6_0[0] = key_rd0[0] & rte_bswap64(0x0000FF00FFFFFFFFLLU);
key_ipv6_1[0] = key_rd1[0] & rte_bswap64(0x0000FF00FFFFFFFFLLU);
key_ipv6_2[0] = key_rd2[0] & rte_bswap64(0x0000FF00FFFFFFFFLLU);
key_ipv6_3[0] = key_rd3[0] & rte_bswap64(0x0000FF00FFFFFFFFLLU);
key_ipv6_0[1] = key_rd0[1];
key_ipv6_1[1] = key_rd1[1];
key_ipv6_2[1] = key_rd2[1];
key_ipv6_3[1] = key_rd3[1];
key_ipv6_0[2] = key_rd0[2];
key_ipv6_1[2] = key_rd1[2];
key_ipv6_2[2] = key_rd2[2];
key_ipv6_3[2] = key_rd3[2];
key_ipv6_0[3] = key_rd0[3];
key_ipv6_1[3] = key_rd1[3];
key_ipv6_2[3] = key_rd2[3];
key_ipv6_3[3] = key_rd3[3];
key_ipv6_0[4] = key_rd0[4];
key_ipv6_1[4] = key_rd1[4];
key_ipv6_2[4] = key_rd2[4];
key_ipv6_3[4] = key_rd3[4];
key_ipv6_0[5] = 0;
key_ipv6_1[5] = 0;
key_ipv6_2[5] = 0;
key_ipv6_3[5] = 0;
key_ipv6_0[6] = 0;
key_ipv6_1[6] = 0;
key_ipv6_2[6] = 0;
key_ipv6_3[6] = 0;
key_ipv6_0[7] = 0;
key_ipv6_1[7] = 0;
key_ipv6_2[7] = 0;
key_ipv6_3[7] = 0;
/* Compute */
hash_ipv6_0 = p_pt->f_hash(key_ipv6_0, 64, 0);
hash_ipv6_1 = p_pt->f_hash(key_ipv6_1, 64, 0);
hash_ipv6_2 = p_pt->f_hash(key_ipv6_2, 64, 0);
hash_ipv6_3 = p_pt->f_hash(key_ipv6_3, 64, 0);
/* Write */
key_wr0[0] = key_ipv6_0[0];
key_wr1[0] = key_ipv6_1[0];
key_wr2[0] = key_ipv6_2[0];
key_wr3[0] = key_ipv6_3[0];
key_wr0[1] = key_ipv6_0[1];
key_wr1[1] = key_ipv6_1[1];
key_wr2[1] = key_ipv6_2[1];
key_wr3[1] = key_ipv6_3[1];
key_wr0[2] = key_ipv6_0[2];
key_wr1[2] = key_ipv6_1[2];
key_wr2[2] = key_ipv6_2[2];
key_wr3[2] = key_ipv6_3[2];
key_wr0[3] = key_ipv6_0[3];
key_wr1[3] = key_ipv6_1[3];
key_wr2[3] = key_ipv6_2[3];
key_wr3[3] = key_ipv6_3[3];
key_wr0[4] = key_ipv6_0[4];
key_wr1[4] = key_ipv6_1[4];
key_wr2[4] = key_ipv6_2[4];
key_wr3[4] = key_ipv6_3[4];
key_wr0[5] = 0;
key_wr0[5] = 0;
key_wr0[5] = 0;
key_wr0[5] = 0;
key_wr0[6] = 0;
key_wr0[6] = 0;
key_wr0[6] = 0;
key_wr0[6] = 0;
key_wr0[7] = 0;
key_wr0[7] = 0;
key_wr0[7] = 0;
key_wr0[7] = 0;
*hash0 = hash_ipv6_0;
*hash1 = hash_ipv6_1;
*hash2 = hash_ipv6_2;
*hash3 = hash_ipv6_3;
}
PIPELINE_PORT_IN_AH(port_in_ah_key_ipv6, pkt_work_key_ipv6, pkt4_work_key_ipv6);
static int
pipeline_passthrough_parse_args(struct pipeline_passthrough *p,
struct pipeline_params *params)
{
uint32_t key_type_present = 0;
uint32_t key_offset_rd_present = 0;
uint32_t key_offset_wr_present = 0;
uint32_t hash_offset_present = 0;
uint32_t i;
for (i = 0; i < params->n_args; i++) {
char *arg_name = params->args_name[i];
char *arg_value = params->args_value[i];
/* key_type */
if (strcmp(arg_name, "key_type") == 0) {
if (key_type_present)
return -1;
key_type_present = 1;
if ((strcmp(arg_value, "q-in-q") == 0) ||
(strcmp(arg_value, "qinq") == 0))
p->key_type = FLOW_KEY_QINQ;
else if (strcmp(arg_value, "ipv4_5tuple") == 0)
p->key_type = FLOW_KEY_IPV4_5TUPLE;
else if (strcmp(arg_value, "ipv6_5tuple") == 0)
p->key_type = FLOW_KEY_IPV6_5TUPLE;
else
return -1;
p->key_type_valid = 1;
continue;
}
/* key_offset_rd */
if (strcmp(arg_name, "key_offset_rd") == 0) {
if (key_offset_rd_present)
return -1;
key_offset_rd_present = 1;
p->key_offset_rd = atoi(arg_value);
continue;
}
/* key_offset_wr */
if (strcmp(arg_name, "key_offset_wr") == 0) {
if (key_offset_wr_present)
return -1;
key_offset_wr_present = 1;
p->key_offset_wr = atoi(arg_value);
continue;
}
/* hash_offset */
if (strcmp(arg_name, "hash_offset") == 0) {
if (hash_offset_present)
return -1;
hash_offset_present = 1;
p->hash_offset = atoi(arg_value);
continue;
}
/* any other */
return -1;
}
/* Check that mandatory arguments are present */
if ((key_offset_rd_present != key_type_present) ||
(key_offset_wr_present != key_type_present) ||
(hash_offset_present != key_type_present))
return -1;
return 0;
}
static void*
pipeline_passthrough_init(struct pipeline_params *params,
__rte_unused void *arg)
{
struct pipeline *p;
struct pipeline_passthrough *p_pt;
uint32_t size, i;
/* Check input arguments */
if ((params == NULL) ||
(params->n_ports_in == 0) ||
(params->n_ports_out == 0) ||
(params->n_ports_in < params->n_ports_out) ||
(params->n_ports_in % params->n_ports_out))
return NULL;
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_passthrough));
p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
p_pt = (struct pipeline_passthrough *) p;
if (p == NULL)
return NULL;
strcpy(p->name, params->name);
p->log_level = params->log_level;
PLOG(p, HIGH, "Pass-through");
/* Parse arguments */
if (pipeline_passthrough_parse_args(p_pt, params))
return NULL;
if (p_pt->key_type_valid == 0) {
p_pt->f_hash = NULL;
p_pt->f_port_in_ah = NULL;
} else
switch (p_pt->key_type) {
case FLOW_KEY_QINQ:
p_pt->f_hash = hash_default_key8;
p_pt->f_port_in_ah = port_in_ah_key_qinq;
break;
case FLOW_KEY_IPV4_5TUPLE:
p_pt->f_hash = hash_default_key16;
p_pt->f_port_in_ah = port_in_ah_key_ipv4;
break;
case FLOW_KEY_IPV6_5TUPLE:
p_pt->f_hash = hash_default_key64;
p_pt->f_port_in_ah = port_in_ah_key_ipv6;
break;
default:
p_pt->f_hash = NULL;
p_pt->f_port_in_ah = NULL;
}
/* Pipeline */
{
struct rte_pipeline_params pipeline_params = {
.name = "PASS-THROUGH",
.socket_id = params->socket_id,
.offset_port_id = 0,
};
p->p = rte_pipeline_create(&pipeline_params);
if (p->p == NULL) {
rte_free(p);
return NULL;
}
}
/* Input ports */
p->n_ports_in = params->n_ports_in;
for (i = 0; i < p->n_ports_in; i++) {
struct rte_pipeline_port_in_params port_params = {
.ops = pipeline_port_in_params_get_ops(
&params->port_in[i]),
.arg_create = pipeline_port_in_params_convert(
&params->port_in[i]),
.f_action = p_pt->f_port_in_ah,
.arg_ah = p_pt,
.burst_size = params->port_in[i].burst_size,
};
int status = rte_pipeline_port_in_create(p->p,
&port_params,
&p->port_in_id[i]);
if (status) {
rte_pipeline_free(p->p);
rte_free(p);
return NULL;
}
}
/* Output ports */
p->n_ports_out = params->n_ports_out;
for (i = 0; i < p->n_ports_out; i++) {
struct rte_pipeline_port_out_params port_params = {
.ops = pipeline_port_out_params_get_ops(
&params->port_out[i]),
.arg_create = pipeline_port_out_params_convert(
&params->port_out[i]),
.f_action = NULL,
.f_action_bulk = NULL,
.arg_ah = NULL,
};
int status = rte_pipeline_port_out_create(p->p,
&port_params,
&p->port_out_id[i]);
if (status) {
rte_pipeline_free(p->p);
rte_free(p);
return NULL;
}
}
/* Tables */
p->n_tables = p->n_ports_in;
for (i = 0; i < p->n_ports_in; i++) {
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops,
.arg_create = NULL,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
.action_data_size = 0,
};
int status = rte_pipeline_table_create(p->p,
&table_params,
&p->table_id[i]);
if (status) {
rte_pipeline_free(p->p);
rte_free(p);
return NULL;
}
}
/* Connecting input ports to tables */
for (i = 0; i < p->n_ports_in; i++) {
int status = rte_pipeline_port_in_connect_to_table(p->p,
p->port_in_id[i],
p->table_id[i]);
if (status) {
rte_pipeline_free(p->p);
rte_free(p);
return NULL;
}
}
/* Add entries to tables */
for (i = 0; i < p->n_ports_in; i++) {
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = p->port_out_id[
i / (p->n_ports_in / p->n_ports_out)]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
int status = rte_pipeline_table_default_entry_add(p->p,
p->table_id[i],
&default_entry,
&default_entry_ptr);
if (status) {
rte_pipeline_free(p->p);
rte_free(p);
return NULL;
}
}
/* Enable input ports */
for (i = 0; i < p->n_ports_in; i++) {
int status = rte_pipeline_port_in_enable(p->p,
p->port_in_id[i]);
if (status) {
rte_pipeline_free(p->p);
rte_free(p);
return NULL;
}
}
/* Check pipeline consistency */
if (rte_pipeline_check(p->p) < 0) {
rte_pipeline_free(p->p);
rte_free(p);
return NULL;
}
/* Message queues */
p->n_msgq = params->n_msgq;
for (i = 0; i < p->n_msgq; i++)
p->msgq_in[i] = params->msgq_in[i];
for (i = 0; i < p->n_msgq; i++)
p->msgq_out[i] = params->msgq_out[i];
/* Message handlers */
memcpy(p->handlers, handlers, sizeof(p->handlers));
return p;
}
static int
pipeline_passthrough_free(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
/* Check input arguments */
if (p == NULL)
return -1;
/* Free resources */
rte_pipeline_free(p->p);
rte_free(p);
return 0;
}
static int
pipeline_passthrough_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
pipeline_msg_req_handle(p);
rte_pipeline_flush(p->p);
return 0;
}
static int
pipeline_passthrough_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
{
struct pipeline *p = (struct pipeline *) pipeline;
/* Check input arguments */
if ((p == NULL) ||
(port_in >= p->n_ports_in) ||
(port_out == NULL))
return -1;
*port_out = port_in / p->n_ports_in;
return 0;
}
struct pipeline_be_ops pipeline_passthrough_be_ops = {
.f_init = pipeline_passthrough_init,
.f_free = pipeline_passthrough_free,
.f_run = NULL,
.f_timer = pipeline_passthrough_timer,
.f_track = pipeline_passthrough_track,
};

View File

@ -0,0 +1,41 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
#define __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
#include "pipeline_common_be.h"
extern struct pipeline_be_ops pipeline_passthrough_be_ops;
#endif