2017-12-19 15:48:59 +00:00
|
|
|
# SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
# Copyright(c) 2010-2015 Intel Corporation
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
include $(RTE_SDK)/mk/rte.vars.mk
|
|
|
|
|
2014-08-18 11:29:19 +00:00
|
|
|
ifeq ($(CONFIG_RTE_TEST_PMD),y)
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
#
|
|
|
|
# library name
|
|
|
|
#
|
|
|
|
APP = testpmd
|
|
|
|
|
2018-01-22 01:48:05 +00:00
|
|
|
CFLAGS += -DALLOW_EXPERIMENTAL_API
|
2012-09-04 12:54:00 +00:00
|
|
|
CFLAGS += -O3
|
|
|
|
CFLAGS += $(WERROR_FLAGS)
|
2018-07-11 14:14:09 +00:00
|
|
|
CFLAGS += -Wno-deprecated-declarations
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#
|
|
|
|
# all source are stored in SRCS-y
|
|
|
|
#
|
2014-08-18 11:29:19 +00:00
|
|
|
SRCS-y := testpmd.c
|
|
|
|
SRCS-y += parameters.c
|
|
|
|
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline.c
|
2016-12-21 14:51:23 +00:00
|
|
|
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_flow.c
|
2017-10-13 12:22:18 +00:00
|
|
|
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_mtr.c
|
2017-10-16 18:55:08 +00:00
|
|
|
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_tm.c
|
2014-08-18 11:29:19 +00:00
|
|
|
SRCS-y += config.c
|
|
|
|
SRCS-y += iofwd.c
|
|
|
|
SRCS-y += macfwd.c
|
|
|
|
SRCS-y += macswap.c
|
|
|
|
SRCS-y += flowgen.c
|
|
|
|
SRCS-y += rxonly.c
|
|
|
|
SRCS-y += txonly.c
|
|
|
|
SRCS-y += csumonly.c
|
|
|
|
SRCS-y += icmpecho.c
|
app/testpmd: add noisy neighbour forwarding mode
This adds a new forwarding mode to testpmd to simulate
more realistic behavior of a guest machine engaged in receiving
and sending packets performing Virtual Network Function (VNF).
The goal is to enable a simple way of measuring performance impact on
cache and memory footprint utilization from various VNF co-located on
the same host machine. For this it does:
* Buffer packets in a FIFO:
Create a fifo to buffer received packets. Once it flows over put
those packets into the actual tx queue. The fifo is created per tx
queue and its size can be set with the --noisy-tx-sw-buffer-flushtime
commandline parameter.
A second commandline parameter is used to set a timeout in
milliseconds after which the fifo is flushed.
--noisy-tx-sw-buffer-size [packet numbers]
Keep the mbuf in a FIFO and forward the over flooding packets from the
FIFO. This queue is per TX-queue (after all other packet processing).
--noisy-tx-sw-buffer-flushtime [delay]
Flush the packet queue if no packets have been seen during
[delay]. As long as packets are seen, the timer is reset.
Add several options to simulate route lookups (memory reads) in tables
that can be quite large, as well as route hit statistics update.
These options simulates the while stack traversal and
will trash the cache. Memory access is random.
* simulate route lookups:
Allocate a buffer and perform reads and writes on it as specified by
commandline options:
--noisy-lkup-memory [size]
Size of the VNF internal memory (MB), in which the random
read/write will be done, allocated by rte_malloc (hugepages).
--noisy-lkup-num-writes [num]
Number of random writes in memory per packet should be
performed, simulating hit-flags update. 64 bits per write,
all write in different cache lines.
--noisy-lkup-num-reads [num]
Number of random reads in memory per packet should be
performed, simulating FIB/table lookups. 64 bits per read,
all write in different cache lines.
--noisy-lkup-num-reads-writes [num]
Number of random reads and writes in memory per packet should
be performed, simulating stats update. 64 bits per read-write, all
reads and writes in different cache lines.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Acked-by: Kevin Traynor <ktraynor@redhat.com>
Acked-by: Bernard Iremonger <bernard.iremonger@intel.com>
2018-10-03 18:57:11 +00:00
|
|
|
SRCS-y += noisy_vnf.c
|
2014-08-18 11:29:19 +00:00
|
|
|
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
|
2018-05-10 10:23:08 +00:00
|
|
|
SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_cmd.c
|
2018-10-17 15:22:09 +00:00
|
|
|
SRCS-y += util.c
|
2014-02-12 15:32:25 +00:00
|
|
|
|
2018-07-06 17:21:16 +00:00
|
|
|
ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC), y)
|
|
|
|
SRCS-y += softnicfwd.c
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
endif
|
|
|
|
|
2017-01-12 07:46:54 +00:00
|
|
|
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
|
2017-01-31 15:04:47 +00:00
|
|
|
|
2017-01-31 15:04:48 +00:00
|
|
|
ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
|
|
|
|
LDLIBS += -lrte_pmd_bond
|
|
|
|
endif
|
|
|
|
|
2018-02-23 09:58:02 +00:00
|
|
|
ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS)$(CONFIG_RTE_LIBRTE_DPAA_PMD),yy)
|
2018-01-10 10:46:38 +00:00
|
|
|
LDLIBS += -lrte_pmd_dpaa
|
2018-02-23 09:58:02 +00:00
|
|
|
LDLIBS += -lrte_bus_dpaa
|
|
|
|
LDLIBS += -lrte_mempool_dpaa
|
2018-01-10 10:46:38 +00:00
|
|
|
endif
|
|
|
|
|
2017-01-31 15:04:47 +00:00
|
|
|
ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
|
|
|
|
LDLIBS += -lrte_pmd_ixgbe
|
|
|
|
endif
|
|
|
|
|
|
|
|
ifeq ($(CONFIG_RTE_LIBRTE_I40E_PMD),y)
|
|
|
|
LDLIBS += -lrte_pmd_i40e
|
|
|
|
endif
|
|
|
|
|
2017-06-01 17:07:16 +00:00
|
|
|
ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD),y)
|
|
|
|
LDLIBS += -lrte_pmd_bnxt
|
|
|
|
endif
|
|
|
|
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC),y)
|
|
|
|
LDLIBS += -lrte_pmd_softnic
|
|
|
|
endif
|
|
|
|
|
2017-01-12 07:46:54 +00:00
|
|
|
endif
|
2016-10-12 17:54:12 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
include $(RTE_SDK)/mk/rte.app.mk
|
2014-08-18 11:29:19 +00:00
|
|
|
|
|
|
|
endif
|