2017-12-19 15:48:59 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2017 Intel Corporation
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _TESTPMD_H_
|
|
|
|
#define _TESTPMD_H_
|
|
|
|
|
2018-10-25 15:11:17 +00:00
|
|
|
#include <stdbool.h>
|
|
|
|
|
2017-07-07 00:04:24 +00:00
|
|
|
#include <rte_pci.h>
|
2017-10-26 10:06:08 +00:00
|
|
|
#include <rte_bus_pci.h>
|
2017-07-09 05:46:46 +00:00
|
|
|
#include <rte_gro.h>
|
2017-10-07 14:56:43 +00:00
|
|
|
#include <rte_gso.h>
|
2017-07-07 00:04:24 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
#define RTE_PORT_ALL (~(portid_t)0x0)
|
|
|
|
|
|
|
|
#define RTE_TEST_RX_DESC_MAX 2048
|
|
|
|
#define RTE_TEST_TX_DESC_MAX 2048
|
|
|
|
|
|
|
|
#define RTE_PORT_STOPPED (uint16_t)0
|
|
|
|
#define RTE_PORT_STARTED (uint16_t)1
|
|
|
|
#define RTE_PORT_CLOSED (uint16_t)2
|
|
|
|
#define RTE_PORT_HANDLING (uint16_t)3
|
|
|
|
|
2016-08-05 15:34:51 +00:00
|
|
|
/*
|
|
|
|
* It is used to allocate the memory for hash key.
|
|
|
|
* The hash key size is NIC dependent.
|
|
|
|
*/
|
|
|
|
#define RSS_HASH_KEY_LENGTH 64
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
|
|
|
* Default size of the mbuf data buffer to receive standard 1518-byte
|
|
|
|
* Ethernet frames in a mono-segment memory buffer.
|
|
|
|
*/
|
2015-04-29 23:31:51 +00:00
|
|
|
#define DEFAULT_MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
|
|
|
|
/**< Default size of mbuf data buffer. */
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The maximum number of segments per packet is used when creating
|
|
|
|
* scattered transmit packets composed of a list of mbufs.
|
|
|
|
*/
|
2014-08-28 15:42:37 +00:00
|
|
|
#define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#define MAX_PKT_BURST 512
|
2014-06-26 06:53:33 +00:00
|
|
|
#define DEF_PKT_BURST 32
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2014-06-26 06:53:34 +00:00
|
|
|
#define DEF_MBUF_CACHE 250
|
|
|
|
|
2014-11-19 12:26:06 +00:00
|
|
|
#define RTE_CACHE_LINE_SIZE_ROUNDUP(size) \
|
|
|
|
(RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE))
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
#define NUMA_NO_CONFIG 0xFF
|
|
|
|
#define UMA_NO_CONFIG 0xFF
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
typedef uint8_t lcoreid_t;
|
2017-09-29 07:17:24 +00:00
|
|
|
typedef uint16_t portid_t;
|
2012-09-04 12:54:00 +00:00
|
|
|
typedef uint16_t queueid_t;
|
|
|
|
typedef uint16_t streamid_t;
|
|
|
|
|
|
|
|
#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
|
|
|
|
|
2018-07-06 17:21:16 +00:00
|
|
|
#if defined RTE_LIBRTE_PMD_SOFTNIC
|
|
|
|
#define SOFTNIC 1
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
#else
|
2018-07-06 17:21:16 +00:00
|
|
|
#define SOFTNIC 0
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
#endif
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
enum {
|
|
|
|
PORT_TOPOLOGY_PAIRED,
|
2014-04-03 17:30:11 +00:00
|
|
|
PORT_TOPOLOGY_CHAINED,
|
|
|
|
PORT_TOPOLOGY_LOOP,
|
2012-09-04 12:54:00 +00:00
|
|
|
};
|
|
|
|
|
2018-10-02 13:34:57 +00:00
|
|
|
enum {
|
|
|
|
MP_ALLOC_NATIVE, /**< allocate and populate mempool natively */
|
|
|
|
MP_ALLOC_ANON,
|
|
|
|
/**< allocate mempool natively, but populate using anonymous memory */
|
|
|
|
MP_ALLOC_XMEM,
|
|
|
|
/**< allocate and populate mempool using anonymous memory */
|
|
|
|
MP_ALLOC_XMEM_HUGE
|
|
|
|
/**< allocate and populate mempool using anonymous hugepage memory */
|
|
|
|
};
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
|
|
|
|
/**
|
|
|
|
* The data structure associated with RX and TX packet burst statistics
|
|
|
|
* that are recorded for each forwarding stream.
|
|
|
|
*/
|
|
|
|
struct pkt_burst_stats {
|
|
|
|
unsigned int pkt_burst_spread[MAX_PKT_BURST];
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2018-04-19 10:07:40 +00:00
|
|
|
/** Information for a given RSS type. */
|
|
|
|
struct rss_type_info {
|
|
|
|
const char *str; /**< Type name. */
|
|
|
|
uint64_t rss_type; /**< Type value. */
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* RSS type information table.
|
|
|
|
*
|
|
|
|
* An entry with a NULL type name terminates the list.
|
|
|
|
*/
|
|
|
|
extern const struct rss_type_info rss_type_table[];
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* The data structure associated with a forwarding stream between a receive
|
|
|
|
* port/queue and a transmit port/queue.
|
|
|
|
*/
|
|
|
|
struct fwd_stream {
|
|
|
|
/* "read-only" data */
|
|
|
|
portid_t rx_port; /**< port to poll for received packets */
|
|
|
|
queueid_t rx_queue; /**< RX queue to poll on "rx_port" */
|
|
|
|
portid_t tx_port; /**< forwarding port of received packets */
|
|
|
|
queueid_t tx_queue; /**< TX queue to send forwarded packets */
|
|
|
|
streamid_t peer_addr; /**< index of peer ethernet address of packets */
|
|
|
|
|
2016-06-14 23:08:02 +00:00
|
|
|
unsigned int retry_enabled;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* "read-write" results */
|
2019-03-25 08:51:44 +00:00
|
|
|
uint64_t rx_packets; /**< received packets */
|
|
|
|
uint64_t tx_packets; /**< received packets transmitted */
|
|
|
|
uint64_t fwd_dropped; /**< received packets not forwarded */
|
|
|
|
uint64_t rx_bad_ip_csum ; /**< received packets has bad ip checksum */
|
|
|
|
uint64_t rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
|
|
|
|
uint64_t rx_bad_outer_l4_csum;
|
2018-10-09 14:18:18 +00:00
|
|
|
/**< received packets has bad outer l4 checksum */
|
2017-10-07 07:45:57 +00:00
|
|
|
unsigned int gro_times; /**< GRO operation times */
|
2012-09-04 12:54:00 +00:00
|
|
|
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
|
|
|
|
uint64_t core_cycles; /**< used for RX and TX processing */
|
|
|
|
#endif
|
|
|
|
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
|
|
|
|
struct pkt_burst_stats rx_burst_stats;
|
|
|
|
struct pkt_burst_stats tx_burst_stats;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2016-12-21 14:51:22 +00:00
|
|
|
/** Descriptor for a single flow. */
|
|
|
|
struct port_flow {
|
|
|
|
struct port_flow *next; /**< Next flow in list. */
|
|
|
|
struct port_flow *tmp; /**< Temporary linking. */
|
|
|
|
uint32_t id; /**< Flow rule ID. */
|
|
|
|
struct rte_flow *flow; /**< Opaque flow object returned by PMD. */
|
2018-08-31 09:01:05 +00:00
|
|
|
struct rte_flow_conv_rule rule; /* Saved flow rule description. */
|
|
|
|
uint8_t data[]; /**< Storage for flow rule description */
|
2016-12-21 14:51:22 +00:00
|
|
|
};
|
|
|
|
|
2018-07-06 17:21:16 +00:00
|
|
|
#ifdef SOFTNIC
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
/**
|
|
|
|
* The data structure associate with softnic port
|
|
|
|
*/
|
|
|
|
struct softnic_port {
|
2018-07-06 17:21:16 +00:00
|
|
|
uint32_t default_tm_hierarchy_enable; /**< default tm hierarchy */
|
|
|
|
struct fwd_lcore **fwd_lcore_arg; /**< softnic fwd core parameters */
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* The data structure associated with each port.
|
|
|
|
*/
|
|
|
|
struct rte_port {
|
|
|
|
struct rte_eth_dev_info dev_info; /**< PCI info + driver name */
|
|
|
|
struct rte_eth_conf dev_conf; /**< Port configuration. */
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr eth_addr; /**< Port ethernet address */
|
2012-09-04 12:54:00 +00:00
|
|
|
struct rte_eth_stats stats; /**< Last port statistics */
|
|
|
|
unsigned int socket_id; /**< For NUMA support */
|
2018-01-10 09:09:14 +00:00
|
|
|
uint16_t parse_tunnel:1; /**< Parse internal headers */
|
2016-09-26 13:48:34 +00:00
|
|
|
uint16_t tso_segsz; /**< Segmentation offload MSS for non-tunneled packets. */
|
|
|
|
uint16_t tunnel_tso_segsz; /**< Segmentation offload MSS for tunneled pkts. */
|
2015-06-11 07:03:58 +00:00
|
|
|
uint16_t tx_vlan_id;/**< The tag ID */
|
|
|
|
uint16_t tx_vlan_id_outer;/**< The outer tag ID */
|
2012-12-19 23:00:00 +00:00
|
|
|
uint8_t tx_queue_stats_mapping_enabled;
|
|
|
|
uint8_t rx_queue_stats_mapping_enabled;
|
2012-12-19 23:00:00 +00:00
|
|
|
volatile uint16_t port_status; /**< port started or not */
|
2018-10-25 15:11:17 +00:00
|
|
|
uint8_t need_setup; /**< port just attached */
|
2012-12-19 23:00:00 +00:00
|
|
|
uint8_t need_reconfig; /**< need reconfiguring port or not */
|
|
|
|
uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */
|
|
|
|
uint8_t rss_flag; /**< enable rss or not */
|
2015-07-27 15:54:35 +00:00
|
|
|
uint8_t dcb_flag; /**< enable dcb */
|
2018-04-24 12:44:08 +00:00
|
|
|
uint16_t nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */
|
|
|
|
uint16_t nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */
|
|
|
|
struct rte_eth_rxconf rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */
|
|
|
|
struct rte_eth_txconf tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *mc_addr_pool; /**< pool of multicast addrs */
|
2015-05-28 15:05:20 +00:00
|
|
|
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
|
2015-07-27 15:54:35 +00:00
|
|
|
uint8_t slave_flag; /**< bonding slave port */
|
2016-12-21 14:51:22 +00:00
|
|
|
struct port_flow *flow_list; /**< Associated flows. */
|
2018-10-17 15:22:10 +00:00
|
|
|
const struct rte_eth_rxtx_callback *rx_dump_cb[MAX_QUEUE_ID+1];
|
|
|
|
const struct rte_eth_rxtx_callback *tx_dump_cb[MAX_QUEUE_ID+1];
|
2018-07-06 17:21:16 +00:00
|
|
|
#ifdef SOFTNIC
|
|
|
|
struct softnic_port softport; /**< softnic params */
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
#endif
|
2018-10-21 14:22:48 +00:00
|
|
|
/**< metadata value to insert in Tx packets. */
|
|
|
|
rte_be32_t tx_metadata;
|
2018-10-24 06:21:59 +00:00
|
|
|
const struct rte_eth_rxtx_callback *tx_set_md_cb[MAX_QUEUE_ID+1];
|
2012-09-04 12:54:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The data structure associated with each forwarding logical core.
|
|
|
|
* The logical cores are internally numbered by a core index from 0 to
|
|
|
|
* the maximum number of logical cores - 1.
|
|
|
|
* The system CPU identifier of all logical cores are setup in a global
|
|
|
|
* CPU id. configuration table.
|
|
|
|
*/
|
|
|
|
struct fwd_lcore {
|
2017-10-07 14:56:43 +00:00
|
|
|
struct rte_gso_ctx gso_ctx; /**< GSO context */
|
2012-09-04 12:54:00 +00:00
|
|
|
struct rte_mempool *mbp; /**< The mbuf pool to use by this core */
|
2017-10-07 07:45:57 +00:00
|
|
|
void *gro_ctx; /**< GRO context */
|
2012-09-04 12:54:00 +00:00
|
|
|
streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */
|
|
|
|
streamid_t stream_nb; /**< number of streams in "fwd_streams" */
|
|
|
|
lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */
|
|
|
|
queueid_t tx_queue; /**< TX queue to send forwarded packets */
|
|
|
|
volatile char stopped; /**< stop forwarding when set */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Forwarding mode operations:
|
|
|
|
* - IO forwarding mode (default mode)
|
|
|
|
* Forwards packets unchanged.
|
|
|
|
*
|
|
|
|
* - MAC forwarding mode
|
|
|
|
* Set the source and the destination Ethernet addresses of packets
|
|
|
|
* before forwarding them.
|
|
|
|
*
|
|
|
|
* - IEEE1588 forwarding mode
|
|
|
|
* Check that received IEEE1588 Precise Time Protocol (PTP) packets are
|
|
|
|
* filtered and timestamped by the hardware.
|
|
|
|
* Forwards packets unchanged on the same port.
|
|
|
|
* Check that sent IEEE1588 PTP packets are timestamped by the hardware.
|
|
|
|
*/
|
|
|
|
typedef void (*port_fwd_begin_t)(portid_t pi);
|
|
|
|
typedef void (*port_fwd_end_t)(portid_t pi);
|
|
|
|
typedef void (*packet_fwd_t)(struct fwd_stream *fs);
|
|
|
|
|
|
|
|
struct fwd_engine {
|
|
|
|
const char *fwd_mode_name; /**< Forwarding mode name. */
|
|
|
|
port_fwd_begin_t port_fwd_begin; /**< NULL if nothing special to do. */
|
|
|
|
port_fwd_end_t port_fwd_end; /**< NULL if nothing special to do. */
|
|
|
|
packet_fwd_t packet_fwd; /**< Mandatory. */
|
|
|
|
};
|
|
|
|
|
2016-06-14 23:08:02 +00:00
|
|
|
#define BURST_TX_WAIT_US 1
|
|
|
|
#define BURST_TX_RETRIES 64
|
|
|
|
|
|
|
|
extern uint32_t burst_tx_delay_time;
|
|
|
|
extern uint32_t burst_tx_retry_num;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
extern struct fwd_engine io_fwd_engine;
|
|
|
|
extern struct fwd_engine mac_fwd_engine;
|
2014-04-03 17:30:15 +00:00
|
|
|
extern struct fwd_engine mac_swap_engine;
|
2014-04-03 17:30:16 +00:00
|
|
|
extern struct fwd_engine flow_gen_engine;
|
2012-09-04 12:54:00 +00:00
|
|
|
extern struct fwd_engine rx_only_engine;
|
|
|
|
extern struct fwd_engine tx_only_engine;
|
|
|
|
extern struct fwd_engine csum_fwd_engine;
|
app/testpmd: add engine that replies to ARP and ICMP echo requests
Add a new specific packet processing engine in the "testpmd" application that
only replies to ARP requests and to ICMP echo requests.
For this purpose, a new "icmpecho" forwarding mode is provided that can be
dynamically selected with the following testpmd command:
set fwd icmpecho
before starting the receipt of packets on the selected ports.
Then, the "icmpecho" engine performs the following actions on all received
packets:
- replies to a received ARP request by sending back on the RX port a ARP
reply with a "sender hardware address" field containing the MAC address
of the RX port,
- replies to a ICMP echo request by sending back on the RX port a ICMP echo
reply, swapping the IP source and the IP destination address in the IP
header,
- otherwise, simply drops the received packet.
When replying to a received packet that was encapsulated into a VLAN tunnel,
the reply is sent back with the same VLAN identifier.
By default, the testpmd configures VLAN header stripping RX option on each
port.
This option is not managed by the icmpecho engine which won't detect
packets that were encapsulated into a VLAN.
To address this issue, the VLAN header stripping option must be previously
switched off with the following testpmd command:
vlan set strip off
When the "verbose" mode has been set with the testpmd command
"set verbose 1", the "icmpecho" engine displays informations about each
received packet.
The "icmpecho" forwarding engine can also be used to simply check port
connectivity at the hardware level (check that cables are well-plugged)
and at the software level (receipt of VLAN packets, for instance).
Signed-off-by: Ivan Boule <ivan.boule@6wind.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2014-04-30 13:30:11 +00:00
|
|
|
extern struct fwd_engine icmp_echo_engine;
|
app/testpmd: add noisy neighbour forwarding mode
This adds a new forwarding mode to testpmd to simulate
more realistic behavior of a guest machine engaged in receiving
and sending packets performing Virtual Network Function (VNF).
The goal is to enable a simple way of measuring performance impact on
cache and memory footprint utilization from various VNF co-located on
the same host machine. For this it does:
* Buffer packets in a FIFO:
Create a fifo to buffer received packets. Once it flows over put
those packets into the actual tx queue. The fifo is created per tx
queue and its size can be set with the --noisy-tx-sw-buffer-flushtime
commandline parameter.
A second commandline parameter is used to set a timeout in
milliseconds after which the fifo is flushed.
--noisy-tx-sw-buffer-size [packet numbers]
Keep the mbuf in a FIFO and forward the over flooding packets from the
FIFO. This queue is per TX-queue (after all other packet processing).
--noisy-tx-sw-buffer-flushtime [delay]
Flush the packet queue if no packets have been seen during
[delay]. As long as packets are seen, the timer is reset.
Add several options to simulate route lookups (memory reads) in tables
that can be quite large, as well as route hit statistics update.
These options simulates the while stack traversal and
will trash the cache. Memory access is random.
* simulate route lookups:
Allocate a buffer and perform reads and writes on it as specified by
commandline options:
--noisy-lkup-memory [size]
Size of the VNF internal memory (MB), in which the random
read/write will be done, allocated by rte_malloc (hugepages).
--noisy-lkup-num-writes [num]
Number of random writes in memory per packet should be
performed, simulating hit-flags update. 64 bits per write,
all write in different cache lines.
--noisy-lkup-num-reads [num]
Number of random reads in memory per packet should be
performed, simulating FIB/table lookups. 64 bits per read,
all write in different cache lines.
--noisy-lkup-num-reads-writes [num]
Number of random reads and writes in memory per packet should
be performed, simulating stats update. 64 bits per read-write, all
reads and writes in different cache lines.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Acked-by: Kevin Traynor <ktraynor@redhat.com>
Acked-by: Bernard Iremonger <bernard.iremonger@intel.com>
2018-10-03 18:57:11 +00:00
|
|
|
extern struct fwd_engine noisy_vnf_engine;
|
2018-07-06 17:21:16 +00:00
|
|
|
#ifdef SOFTNIC
|
|
|
|
extern struct fwd_engine softnic_fwd_engine;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
#endif
|
2012-09-04 12:54:00 +00:00
|
|
|
#ifdef RTE_LIBRTE_IEEE1588
|
|
|
|
extern struct fwd_engine ieee1588_fwd_engine;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */
|
|
|
|
|
2019-04-07 05:02:25 +00:00
|
|
|
extern uint16_t mempool_flags;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* Forwarding Configuration
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct fwd_config {
|
|
|
|
struct fwd_engine *fwd_eng; /**< Packet forwarding mode. */
|
|
|
|
streamid_t nb_fwd_streams; /**< Nb. of forward streams to process. */
|
|
|
|
lcoreid_t nb_fwd_lcores; /**< Nb. of logical cores to launch. */
|
|
|
|
portid_t nb_fwd_ports; /**< Nb. of ports involved. */
|
|
|
|
};
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
/**
|
|
|
|
* DCB mode enable
|
|
|
|
*/
|
|
|
|
enum dcb_mode_enable
|
|
|
|
{
|
|
|
|
DCB_VT_ENABLED,
|
|
|
|
DCB_ENABLED
|
|
|
|
};
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
#define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32 tx_queues/port */
|
|
|
|
#define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */
|
|
|
|
|
|
|
|
struct queue_stats_mappings {
|
2017-09-29 07:17:24 +00:00
|
|
|
portid_t port_id;
|
2012-12-19 23:00:00 +00:00
|
|
|
uint16_t queue_id;
|
|
|
|
uint8_t stats_counter_id;
|
|
|
|
} __rte_cache_aligned;
|
|
|
|
|
|
|
|
extern struct queue_stats_mappings tx_queue_stats_mappings_array[];
|
|
|
|
extern struct queue_stats_mappings rx_queue_stats_mappings_array[];
|
|
|
|
|
|
|
|
/* Assign both tx and rx queue stats mappings to the same default values */
|
|
|
|
extern struct queue_stats_mappings *tx_queue_stats_mappings;
|
|
|
|
extern struct queue_stats_mappings *rx_queue_stats_mappings;
|
|
|
|
|
|
|
|
extern uint16_t nb_tx_queue_stats_mappings;
|
|
|
|
extern uint16_t nb_rx_queue_stats_mappings;
|
|
|
|
|
2017-10-20 17:09:48 +00:00
|
|
|
extern uint8_t xstats_hide_zero; /**< Hide zero values for xstats display */
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* globals used for configuration */
|
|
|
|
extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
|
2017-12-08 13:19:10 +00:00
|
|
|
extern int testpmd_logtype; /**< Log type for testpmd logs */
|
2012-09-04 12:54:00 +00:00
|
|
|
extern uint8_t interactive;
|
2014-04-03 17:30:12 +00:00
|
|
|
extern uint8_t auto_start;
|
2017-06-15 04:04:03 +00:00
|
|
|
extern uint8_t tx_first;
|
2017-03-31 19:13:19 +00:00
|
|
|
extern char cmdline_filename[PATH_MAX]; /**< offline commands file */
|
2012-09-04 12:54:00 +00:00
|
|
|
extern uint8_t numa_support; /**< set by "--numa" parameter */
|
|
|
|
extern uint16_t port_topology; /**< set by "--port-topology" parameter */
|
2013-09-18 10:00:00 +00:00
|
|
|
extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */
|
2017-07-09 08:08:05 +00:00
|
|
|
extern uint8_t flow_isolate_all; /**< set by "--flow-isolate-all */
|
2018-10-02 13:34:57 +00:00
|
|
|
extern uint8_t mp_alloc_type;
|
|
|
|
/**< set by "--mp-anon" or "--mp-alloc" parameter */
|
2014-04-30 13:30:02 +00:00
|
|
|
extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */
|
2014-06-25 20:07:47 +00:00
|
|
|
extern volatile int test_done; /* stop packet forwarding when set to 1. */
|
2017-04-18 12:17:41 +00:00
|
|
|
extern uint8_t lsc_interrupt; /**< disabled by "--no-lsc-interrupt" parameter */
|
2017-04-18 12:17:42 +00:00
|
|
|
extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */
|
2017-05-02 09:54:06 +00:00
|
|
|
extern uint32_t event_print_mask;
|
|
|
|
/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */
|
2018-10-25 15:11:17 +00:00
|
|
|
extern bool setup_on_probe_event; /**< disabled by port setup-on iterator */
|
2018-05-03 12:38:19 +00:00
|
|
|
extern uint8_t hot_plug; /**< enable by "--hot-plug" parameter */
|
|
|
|
extern int do_mlockall; /**< set by "--mlockall" or "--no-mlockall" parameter */
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2017-05-31 11:10:25 +00:00
|
|
|
#ifdef RTE_LIBRTE_IXGBE_BYPASS
|
2013-11-08 02:00:00 +00:00
|
|
|
extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
|
|
|
|
#endif
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/*
|
2014-06-03 23:42:50 +00:00
|
|
|
* Store specified sockets on which memory pool to be used by ports
|
|
|
|
* is allocated.
|
2013-06-03 00:00:00 +00:00
|
|
|
*/
|
2018-01-24 11:53:36 +00:00
|
|
|
extern uint8_t port_numa[RTE_MAX_ETHPORTS];
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Store specified sockets on which RX ring to be used by ports
|
2014-06-03 23:42:50 +00:00
|
|
|
* is allocated.
|
2013-06-03 00:00:00 +00:00
|
|
|
*/
|
2018-01-24 11:53:36 +00:00
|
|
|
extern uint8_t rxring_numa[RTE_MAX_ETHPORTS];
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Store specified sockets on which TX ring to be used by ports
|
2014-06-03 23:42:50 +00:00
|
|
|
* is allocated.
|
2013-06-03 00:00:00 +00:00
|
|
|
*/
|
2018-01-24 11:53:36 +00:00
|
|
|
extern uint8_t txring_numa[RTE_MAX_ETHPORTS];
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
extern uint8_t socket_num;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
|
|
|
* Configuration of logical cores:
|
|
|
|
* nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
|
|
|
|
*/
|
|
|
|
extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */
|
|
|
|
extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
|
|
|
|
extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
|
|
|
|
extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE];
|
2017-05-09 07:28:37 +00:00
|
|
|
extern unsigned int num_sockets;
|
|
|
|
extern unsigned int socket_ids[RTE_MAX_NUMA_NODES];
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Configuration of Ethernet ports:
|
|
|
|
* nb_fwd_ports <= nb_cfg_ports <= nb_ports
|
|
|
|
*/
|
|
|
|
extern portid_t nb_ports; /**< Number of ethernet ports probed at init time. */
|
|
|
|
extern portid_t nb_cfg_ports; /**< Number of configured ports. */
|
|
|
|
extern portid_t nb_fwd_ports; /**< Number of forwarding ports. */
|
|
|
|
extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];
|
|
|
|
extern struct rte_port *ports;
|
|
|
|
|
|
|
|
extern struct rte_eth_rxmode rx_mode;
|
2018-01-10 09:09:15 +00:00
|
|
|
extern struct rte_eth_txmode tx_mode;
|
|
|
|
|
2014-06-05 05:08:50 +00:00
|
|
|
extern uint64_t rss_hf;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
extern queueid_t nb_rxq;
|
|
|
|
extern queueid_t nb_txq;
|
|
|
|
|
|
|
|
extern uint16_t nb_rxd;
|
|
|
|
extern uint16_t nb_txd;
|
|
|
|
|
2015-02-12 14:56:38 +00:00
|
|
|
extern int16_t rx_free_thresh;
|
|
|
|
extern int8_t rx_drop_en;
|
|
|
|
extern int16_t tx_free_thresh;
|
|
|
|
extern int16_t tx_rs_thresh;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
app/testpmd: add noisy neighbour forwarding mode
This adds a new forwarding mode to testpmd to simulate
more realistic behavior of a guest machine engaged in receiving
and sending packets performing Virtual Network Function (VNF).
The goal is to enable a simple way of measuring performance impact on
cache and memory footprint utilization from various VNF co-located on
the same host machine. For this it does:
* Buffer packets in a FIFO:
Create a fifo to buffer received packets. Once it flows over put
those packets into the actual tx queue. The fifo is created per tx
queue and its size can be set with the --noisy-tx-sw-buffer-flushtime
commandline parameter.
A second commandline parameter is used to set a timeout in
milliseconds after which the fifo is flushed.
--noisy-tx-sw-buffer-size [packet numbers]
Keep the mbuf in a FIFO and forward the over flooding packets from the
FIFO. This queue is per TX-queue (after all other packet processing).
--noisy-tx-sw-buffer-flushtime [delay]
Flush the packet queue if no packets have been seen during
[delay]. As long as packets are seen, the timer is reset.
Add several options to simulate route lookups (memory reads) in tables
that can be quite large, as well as route hit statistics update.
These options simulates the while stack traversal and
will trash the cache. Memory access is random.
* simulate route lookups:
Allocate a buffer and perform reads and writes on it as specified by
commandline options:
--noisy-lkup-memory [size]
Size of the VNF internal memory (MB), in which the random
read/write will be done, allocated by rte_malloc (hugepages).
--noisy-lkup-num-writes [num]
Number of random writes in memory per packet should be
performed, simulating hit-flags update. 64 bits per write,
all write in different cache lines.
--noisy-lkup-num-reads [num]
Number of random reads in memory per packet should be
performed, simulating FIB/table lookups. 64 bits per read,
all write in different cache lines.
--noisy-lkup-num-reads-writes [num]
Number of random reads and writes in memory per packet should
be performed, simulating stats update. 64 bits per read-write, all
reads and writes in different cache lines.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Acked-by: Kevin Traynor <ktraynor@redhat.com>
Acked-by: Bernard Iremonger <bernard.iremonger@intel.com>
2018-10-03 18:57:11 +00:00
|
|
|
extern uint16_t noisy_tx_sw_bufsz;
|
|
|
|
extern uint16_t noisy_tx_sw_buf_flush_time;
|
|
|
|
extern uint64_t noisy_lkup_mem_sz;
|
|
|
|
extern uint64_t noisy_lkup_num_writes;
|
|
|
|
extern uint64_t noisy_lkup_num_reads;
|
|
|
|
extern uint64_t noisy_lkup_num_reads_writes;
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
extern uint8_t dcb_config;
|
|
|
|
extern uint8_t dcb_test;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
extern uint16_t mbuf_data_size; /**< Mbuf data space size. */
|
2012-12-19 23:00:00 +00:00
|
|
|
extern uint32_t param_total_num_mbufs;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2017-07-06 03:05:16 +00:00
|
|
|
extern uint16_t stats_period;
|
2017-03-30 21:01:02 +00:00
|
|
|
|
|
|
|
#ifdef RTE_LIBRTE_LATENCY_STATS
|
|
|
|
extern uint8_t latencystats_enabled;
|
|
|
|
extern lcoreid_t latencystats_lcore_id;
|
|
|
|
#endif
|
|
|
|
|
2017-04-28 11:00:13 +00:00
|
|
|
#ifdef RTE_LIBRTE_BITRATE
|
|
|
|
extern lcoreid_t bitrate_lcore_id;
|
|
|
|
extern uint8_t bitrate_enabled;
|
|
|
|
#endif
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
extern struct rte_fdir_conf fdir_conf;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configuration of packet segments used by the "txonly" processing engine.
|
|
|
|
*/
|
|
|
|
#define TXONLY_DEF_PACKET_LEN 64
|
|
|
|
extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */
|
|
|
|
extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
|
|
|
|
extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */
|
|
|
|
|
2015-11-10 13:48:20 +00:00
|
|
|
enum tx_pkt_split {
|
|
|
|
TX_PKT_SPLIT_OFF,
|
|
|
|
TX_PKT_SPLIT_ON,
|
|
|
|
TX_PKT_SPLIT_RND,
|
|
|
|
};
|
|
|
|
|
|
|
|
extern enum tx_pkt_split tx_pkt_split;
|
|
|
|
|
2019-03-28 18:46:28 +00:00
|
|
|
extern uint8_t txonly_multi_flow;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
extern uint16_t nb_pkt_per_burst;
|
|
|
|
extern uint16_t mb_mempool_cache;
|
2015-02-12 14:56:38 +00:00
|
|
|
extern int8_t rx_pthresh;
|
|
|
|
extern int8_t rx_hthresh;
|
|
|
|
extern int8_t rx_wthresh;
|
|
|
|
extern int8_t tx_pthresh;
|
|
|
|
extern int8_t tx_hthresh;
|
|
|
|
extern int8_t tx_wthresh;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2019-04-10 17:41:30 +00:00
|
|
|
extern uint16_t tx_udp_src_port;
|
|
|
|
extern uint16_t tx_udp_dst_port;
|
|
|
|
|
|
|
|
extern uint32_t tx_ip_src_addr;
|
|
|
|
extern uint32_t tx_ip_dst_addr;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
extern struct fwd_config cur_fwd_config;
|
|
|
|
extern struct fwd_engine *cur_fwd_eng;
|
2016-06-14 23:08:02 +00:00
|
|
|
extern uint32_t retry_enabled;
|
2012-09-04 12:54:00 +00:00
|
|
|
extern struct fwd_lcore **fwd_lcores;
|
|
|
|
extern struct fwd_stream **fwd_streams;
|
|
|
|
|
2018-04-23 12:16:34 +00:00
|
|
|
extern uint16_t vxlan_gpe_udp_port; /**< UDP port of tunnel VXLAN-GPE. */
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */
|
2019-05-21 16:13:03 +00:00
|
|
|
extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2014-02-12 16:09:58 +00:00
|
|
|
extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */
|
|
|
|
extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */
|
|
|
|
|
2017-10-07 07:45:57 +00:00
|
|
|
#define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32
|
|
|
|
#define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \
|
|
|
|
GRO_DEFAULT_ITEM_NUM_PER_FLOW)
|
|
|
|
|
|
|
|
#define GRO_DEFAULT_FLUSH_CYCLES 1
|
|
|
|
#define GRO_MAX_FLUSH_CYCLES 4
|
|
|
|
|
2017-07-09 05:46:46 +00:00
|
|
|
struct gro_status {
|
|
|
|
struct rte_gro_param param;
|
|
|
|
uint8_t enable;
|
|
|
|
};
|
|
|
|
extern struct gro_status gro_ports[RTE_MAX_ETHPORTS];
|
2017-10-07 07:45:57 +00:00
|
|
|
extern uint8_t gro_flush_cycles;
|
2017-07-09 05:46:46 +00:00
|
|
|
|
2017-10-07 14:56:43 +00:00
|
|
|
#define GSO_MAX_PKT_BURST 2048
|
|
|
|
struct gso_status {
|
|
|
|
uint8_t enable;
|
|
|
|
};
|
|
|
|
extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
|
|
|
|
extern uint16_t gso_max_segment_size;
|
|
|
|
|
2018-07-06 06:43:05 +00:00
|
|
|
/* VXLAN encap/decap parameters. */
|
|
|
|
struct vxlan_encap_conf {
|
|
|
|
uint32_t select_ipv4:1;
|
|
|
|
uint32_t select_vlan:1;
|
2019-01-22 10:57:04 +00:00
|
|
|
uint32_t select_tos_ttl:1;
|
2018-07-06 06:43:05 +00:00
|
|
|
uint8_t vni[3];
|
|
|
|
rte_be16_t udp_src;
|
|
|
|
rte_be16_t udp_dst;
|
|
|
|
rte_be32_t ipv4_src;
|
|
|
|
rte_be32_t ipv4_dst;
|
|
|
|
uint8_t ipv6_src[16];
|
|
|
|
uint8_t ipv6_dst[16];
|
|
|
|
rte_be16_t vlan_tci;
|
2019-01-22 10:57:04 +00:00
|
|
|
uint8_t ip_tos;
|
|
|
|
uint8_t ip_ttl;
|
2019-05-21 16:13:05 +00:00
|
|
|
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
|
|
|
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
2018-07-06 06:43:05 +00:00
|
|
|
};
|
|
|
|
struct vxlan_encap_conf vxlan_encap_conf;
|
|
|
|
|
2018-07-06 06:43:06 +00:00
|
|
|
/* NVGRE encap/decap parameters. */
|
|
|
|
struct nvgre_encap_conf {
|
|
|
|
uint32_t select_ipv4:1;
|
|
|
|
uint32_t select_vlan:1;
|
|
|
|
uint8_t tni[3];
|
|
|
|
rte_be32_t ipv4_src;
|
|
|
|
rte_be32_t ipv4_dst;
|
|
|
|
uint8_t ipv6_src[16];
|
|
|
|
uint8_t ipv6_dst[16];
|
|
|
|
rte_be16_t vlan_tci;
|
2019-05-21 16:13:05 +00:00
|
|
|
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
|
|
|
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
2018-07-06 06:43:06 +00:00
|
|
|
};
|
|
|
|
struct nvgre_encap_conf nvgre_encap_conf;
|
|
|
|
|
app/testpmd: add MPLSoUDP encapsulation
MPLSoUDP is an example for L3 tunnel encapsulation.
L3 tunnel type is a tunnel that is missing the layer 2 header of the
inner packet.
Example for MPLSoUDP tunnel:
ETH / IPV4 / UDP / MPLS / IP / L4..L7
In order to encapsulate such a tunnel there is a need to remove L2 of
the inner packet and encap the remaining tunnel, this is done by
applying 2 rte flow commands l2_decap followed by mplsoudp_encap.
Both commands must appear in the same flow, and from the point of the
packet it both actions are applied at the same time. (There is no part
where a packet doesn't have L2 header).
Decapsulating such a tunnel works the other way, first we need to decap
the outer tunnel header and then apply the new L2.
So the commands will be mplsoudp_decap / l2_encap
Due to the complex encapsulation of MPLSoUDP and L2 flow actions and
based on the fact testpmd does not allocate memory, this patch adds a
new command in testpmd to initialise a global structures containing the
necessary information to make the outer layer of the packet. This same
global structures will then be used by the flow commands in testpmd when
the action mplsoudp_encap, mplsoudp_decap, l2_encap, l2_decap, will be
parsed, at this point, the conversion into such action becomes trivial.
The l2_encap and l2_decap actions can also be used for other L3 tunnel
types.
Signed-off-by: Ori Kam <orika@mellanox.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-10-22 17:38:10 +00:00
|
|
|
/* L2 encap parameters. */
|
|
|
|
struct l2_encap_conf {
|
|
|
|
uint32_t select_ipv4:1;
|
|
|
|
uint32_t select_vlan:1;
|
|
|
|
rte_be16_t vlan_tci;
|
2019-05-21 16:13:05 +00:00
|
|
|
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
|
|
|
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
app/testpmd: add MPLSoUDP encapsulation
MPLSoUDP is an example for L3 tunnel encapsulation.
L3 tunnel type is a tunnel that is missing the layer 2 header of the
inner packet.
Example for MPLSoUDP tunnel:
ETH / IPV4 / UDP / MPLS / IP / L4..L7
In order to encapsulate such a tunnel there is a need to remove L2 of
the inner packet and encap the remaining tunnel, this is done by
applying 2 rte flow commands l2_decap followed by mplsoudp_encap.
Both commands must appear in the same flow, and from the point of the
packet it both actions are applied at the same time. (There is no part
where a packet doesn't have L2 header).
Decapsulating such a tunnel works the other way, first we need to decap
the outer tunnel header and then apply the new L2.
So the commands will be mplsoudp_decap / l2_encap
Due to the complex encapsulation of MPLSoUDP and L2 flow actions and
based on the fact testpmd does not allocate memory, this patch adds a
new command in testpmd to initialise a global structures containing the
necessary information to make the outer layer of the packet. This same
global structures will then be used by the flow commands in testpmd when
the action mplsoudp_encap, mplsoudp_decap, l2_encap, l2_decap, will be
parsed, at this point, the conversion into such action becomes trivial.
The l2_encap and l2_decap actions can also be used for other L3 tunnel
types.
Signed-off-by: Ori Kam <orika@mellanox.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-10-22 17:38:10 +00:00
|
|
|
};
|
|
|
|
struct l2_encap_conf l2_encap_conf;
|
|
|
|
|
|
|
|
/* L2 decap parameters. */
|
|
|
|
struct l2_decap_conf {
|
|
|
|
uint32_t select_vlan:1;
|
|
|
|
};
|
|
|
|
struct l2_decap_conf l2_decap_conf;
|
|
|
|
|
2018-10-22 17:38:11 +00:00
|
|
|
/* MPLSoGRE encap parameters. */
|
|
|
|
struct mplsogre_encap_conf {
|
|
|
|
uint32_t select_ipv4:1;
|
|
|
|
uint32_t select_vlan:1;
|
|
|
|
uint8_t label[3];
|
|
|
|
rte_be32_t ipv4_src;
|
|
|
|
rte_be32_t ipv4_dst;
|
|
|
|
uint8_t ipv6_src[16];
|
|
|
|
uint8_t ipv6_dst[16];
|
|
|
|
rte_be16_t vlan_tci;
|
2019-05-21 16:13:05 +00:00
|
|
|
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
|
|
|
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
2018-10-22 17:38:11 +00:00
|
|
|
};
|
|
|
|
struct mplsogre_encap_conf mplsogre_encap_conf;
|
|
|
|
|
|
|
|
/* MPLSoGRE decap parameters. */
|
|
|
|
struct mplsogre_decap_conf {
|
|
|
|
uint32_t select_ipv4:1;
|
|
|
|
uint32_t select_vlan:1;
|
|
|
|
};
|
|
|
|
struct mplsogre_decap_conf mplsogre_decap_conf;
|
|
|
|
|
app/testpmd: add MPLSoUDP encapsulation
MPLSoUDP is an example for L3 tunnel encapsulation.
L3 tunnel type is a tunnel that is missing the layer 2 header of the
inner packet.
Example for MPLSoUDP tunnel:
ETH / IPV4 / UDP / MPLS / IP / L4..L7
In order to encapsulate such a tunnel there is a need to remove L2 of
the inner packet and encap the remaining tunnel, this is done by
applying 2 rte flow commands l2_decap followed by mplsoudp_encap.
Both commands must appear in the same flow, and from the point of the
packet it both actions are applied at the same time. (There is no part
where a packet doesn't have L2 header).
Decapsulating such a tunnel works the other way, first we need to decap
the outer tunnel header and then apply the new L2.
So the commands will be mplsoudp_decap / l2_encap
Due to the complex encapsulation of MPLSoUDP and L2 flow actions and
based on the fact testpmd does not allocate memory, this patch adds a
new command in testpmd to initialise a global structures containing the
necessary information to make the outer layer of the packet. This same
global structures will then be used by the flow commands in testpmd when
the action mplsoudp_encap, mplsoudp_decap, l2_encap, l2_decap, will be
parsed, at this point, the conversion into such action becomes trivial.
The l2_encap and l2_decap actions can also be used for other L3 tunnel
types.
Signed-off-by: Ori Kam <orika@mellanox.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-10-22 17:38:10 +00:00
|
|
|
/* MPLSoUDP encap parameters. */
|
|
|
|
struct mplsoudp_encap_conf {
|
|
|
|
uint32_t select_ipv4:1;
|
|
|
|
uint32_t select_vlan:1;
|
|
|
|
uint8_t label[3];
|
|
|
|
rte_be16_t udp_src;
|
|
|
|
rte_be16_t udp_dst;
|
|
|
|
rte_be32_t ipv4_src;
|
|
|
|
rte_be32_t ipv4_dst;
|
|
|
|
uint8_t ipv6_src[16];
|
|
|
|
uint8_t ipv6_dst[16];
|
|
|
|
rte_be16_t vlan_tci;
|
2019-05-21 16:13:05 +00:00
|
|
|
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
|
|
|
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
app/testpmd: add MPLSoUDP encapsulation
MPLSoUDP is an example for L3 tunnel encapsulation.
L3 tunnel type is a tunnel that is missing the layer 2 header of the
inner packet.
Example for MPLSoUDP tunnel:
ETH / IPV4 / UDP / MPLS / IP / L4..L7
In order to encapsulate such a tunnel there is a need to remove L2 of
the inner packet and encap the remaining tunnel, this is done by
applying 2 rte flow commands l2_decap followed by mplsoudp_encap.
Both commands must appear in the same flow, and from the point of the
packet it both actions are applied at the same time. (There is no part
where a packet doesn't have L2 header).
Decapsulating such a tunnel works the other way, first we need to decap
the outer tunnel header and then apply the new L2.
So the commands will be mplsoudp_decap / l2_encap
Due to the complex encapsulation of MPLSoUDP and L2 flow actions and
based on the fact testpmd does not allocate memory, this patch adds a
new command in testpmd to initialise a global structures containing the
necessary information to make the outer layer of the packet. This same
global structures will then be used by the flow commands in testpmd when
the action mplsoudp_encap, mplsoudp_decap, l2_encap, l2_decap, will be
parsed, at this point, the conversion into such action becomes trivial.
The l2_encap and l2_decap actions can also be used for other L3 tunnel
types.
Signed-off-by: Ori Kam <orika@mellanox.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-10-22 17:38:10 +00:00
|
|
|
};
|
|
|
|
struct mplsoudp_encap_conf mplsoudp_encap_conf;
|
|
|
|
|
|
|
|
/* MPLSoUDP decap parameters. */
|
|
|
|
struct mplsoudp_decap_conf {
|
|
|
|
uint32_t select_ipv4:1;
|
|
|
|
uint32_t select_vlan:1;
|
|
|
|
};
|
|
|
|
struct mplsoudp_decap_conf mplsoudp_decap_conf;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
static inline unsigned int
|
|
|
|
lcore_num(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < RTE_MAX_LCORE; ++i)
|
|
|
|
if (fwd_lcores_cpuids[i] == rte_lcore_id())
|
|
|
|
return i;
|
|
|
|
|
|
|
|
rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct fwd_lcore *
|
|
|
|
current_fwd_lcore(void)
|
|
|
|
{
|
|
|
|
return fwd_lcores[lcore_num()];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mbuf Pools */
|
|
|
|
static inline void
|
|
|
|
mbuf_poolname_build(unsigned int sock_id, char* mp_name, int name_size)
|
|
|
|
{
|
2014-06-24 18:15:28 +00:00
|
|
|
snprintf(mp_name, name_size, "mbuf_pool_socket_%u", sock_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct rte_mempool *
|
|
|
|
mbuf_pool_find(unsigned int sock_id)
|
|
|
|
{
|
|
|
|
char pool_name[RTE_MEMPOOL_NAMESIZE];
|
|
|
|
|
|
|
|
mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name));
|
2016-01-27 13:58:30 +00:00
|
|
|
return rte_mempool_lookup((const char *)pool_name);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Read/Write operations on a PCI register of a port.
|
|
|
|
*/
|
|
|
|
static inline uint32_t
|
|
|
|
port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
|
|
|
|
{
|
2018-04-09 12:09:38 +00:00
|
|
|
const struct rte_pci_device *pci_dev;
|
|
|
|
const struct rte_bus *bus;
|
2012-09-04 12:54:00 +00:00
|
|
|
void *reg_addr;
|
|
|
|
uint32_t reg_v;
|
|
|
|
|
2018-04-09 12:09:38 +00:00
|
|
|
if (!port->dev_info.device) {
|
|
|
|
printf("Invalid device\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bus = rte_bus_find_by_device(port->dev_info.device);
|
|
|
|
if (bus && !strcmp(bus->name, "pci")) {
|
|
|
|
pci_dev = RTE_DEV_TO_PCI(port->dev_info.device);
|
|
|
|
} else {
|
|
|
|
printf("Not a PCI device\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off);
|
2012-09-04 12:54:00 +00:00
|
|
|
reg_v = *((volatile uint32_t *)reg_addr);
|
|
|
|
return rte_le_to_cpu_32(reg_v);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define port_id_pci_reg_read(pt_id, reg_off) \
|
|
|
|
port_pci_reg_read(&ports[(pt_id)], (reg_off))
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v)
|
|
|
|
{
|
2018-04-09 12:09:38 +00:00
|
|
|
const struct rte_pci_device *pci_dev;
|
|
|
|
const struct rte_bus *bus;
|
2012-09-04 12:54:00 +00:00
|
|
|
void *reg_addr;
|
|
|
|
|
2018-04-09 12:09:38 +00:00
|
|
|
if (!port->dev_info.device) {
|
|
|
|
printf("Invalid device\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bus = rte_bus_find_by_device(port->dev_info.device);
|
|
|
|
if (bus && !strcmp(bus->name, "pci")) {
|
|
|
|
pci_dev = RTE_DEV_TO_PCI(port->dev_info.device);
|
|
|
|
} else {
|
|
|
|
printf("Not a PCI device\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off);
|
2012-09-04 12:54:00 +00:00
|
|
|
*((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define port_id_pci_reg_write(pt_id, reg_off, reg_value) \
|
|
|
|
port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value))
|
|
|
|
|
|
|
|
/* Prototypes */
|
2014-12-16 11:07:52 +00:00
|
|
|
unsigned int parse_item_list(char* str, const char* item_name,
|
|
|
|
unsigned int max_items,
|
|
|
|
unsigned int *parsed_items, int check_unique_values);
|
2012-09-04 12:54:00 +00:00
|
|
|
void launch_args_parse(int argc, char** argv);
|
2017-03-31 19:13:19 +00:00
|
|
|
void cmdline_read_from_file(const char *filename);
|
2012-09-04 12:54:00 +00:00
|
|
|
void prompt(void);
|
2015-12-30 21:59:49 +00:00
|
|
|
void prompt_exit(void);
|
2012-09-04 12:54:00 +00:00
|
|
|
void nic_stats_display(portid_t port_id);
|
|
|
|
void nic_stats_clear(portid_t port_id);
|
2014-07-23 12:28:54 +00:00
|
|
|
void nic_xstats_display(portid_t port_id);
|
|
|
|
void nic_xstats_clear(portid_t port_id);
|
2012-12-19 23:00:00 +00:00
|
|
|
void nic_stats_mapping_display(portid_t port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
void port_infos_display(portid_t port_id);
|
2018-09-25 10:32:45 +00:00
|
|
|
void port_summary_display(portid_t port_id);
|
|
|
|
void port_summary_header_display(void);
|
2017-01-16 02:31:26 +00:00
|
|
|
void port_offload_cap_display(portid_t port_id);
|
2015-10-27 12:51:50 +00:00
|
|
|
void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
|
|
|
|
void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
void fwd_lcores_config_display(void);
|
2016-06-14 15:35:37 +00:00
|
|
|
void pkt_fwd_config_display(struct fwd_config *cfg);
|
2012-09-04 12:54:00 +00:00
|
|
|
void rxtx_config_display(void);
|
|
|
|
void fwd_config_setup(void);
|
|
|
|
void set_def_fwd_config(void);
|
2014-11-24 16:33:40 +00:00
|
|
|
void reconfig(portid_t new_port_id, unsigned socket_id);
|
2012-12-19 23:00:00 +00:00
|
|
|
int init_fwd_streams(void);
|
2018-05-03 10:31:46 +00:00
|
|
|
void update_fwd_ports(portid_t new_pid);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-01-14 08:27:10 +00:00
|
|
|
void set_fwd_eth_peer(portid_t port_id, char *peer_addr);
|
|
|
|
|
2014-06-17 18:09:32 +00:00
|
|
|
void port_mtu_set(portid_t port_id, uint16_t mtu);
|
2012-09-04 12:54:00 +00:00
|
|
|
void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos);
|
|
|
|
void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
|
|
|
|
uint8_t bit_v);
|
|
|
|
void port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
|
|
|
|
uint8_t bit1_pos, uint8_t bit2_pos);
|
|
|
|
void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
|
|
|
|
uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value);
|
|
|
|
void port_reg_display(portid_t port_id, uint32_t reg_off);
|
|
|
|
void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value);
|
2016-12-21 14:51:22 +00:00
|
|
|
int port_flow_validate(portid_t port_id,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item *pattern,
|
|
|
|
const struct rte_flow_action *actions);
|
|
|
|
int port_flow_create(portid_t port_id,
|
|
|
|
const struct rte_flow_attr *attr,
|
|
|
|
const struct rte_flow_item *pattern,
|
|
|
|
const struct rte_flow_action *actions);
|
|
|
|
int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule);
|
|
|
|
int port_flow_flush(portid_t port_id);
|
|
|
|
int port_flow_query(portid_t port_id, uint32_t rule,
|
2018-04-26 17:29:19 +00:00
|
|
|
const struct rte_flow_action *action);
|
2016-12-21 14:51:22 +00:00
|
|
|
void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group);
|
2017-06-14 14:48:51 +00:00
|
|
|
int port_flow_isolate(portid_t port_id, int set);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id);
|
|
|
|
void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
int set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc);
|
|
|
|
int set_fwd_lcores_mask(uint64_t lcoremask);
|
2012-09-04 12:54:00 +00:00
|
|
|
void set_fwd_lcores_number(uint16_t nb_lc);
|
|
|
|
|
|
|
|
void set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt);
|
|
|
|
void set_fwd_ports_mask(uint64_t portmask);
|
|
|
|
void set_fwd_ports_number(uint16_t nb_pt);
|
2016-06-14 15:35:36 +00:00
|
|
|
int port_is_forwarding(portid_t port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
void rx_vlan_strip_set(portid_t port_id, int on);
|
|
|
|
void rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on);
|
|
|
|
|
|
|
|
void rx_vlan_filter_set(portid_t port_id, int on);
|
2012-09-04 12:54:00 +00:00
|
|
|
void rx_vlan_all_filter_set(portid_t port_id, int on);
|
2015-02-20 10:26:12 +00:00
|
|
|
int rx_vft_set(portid_t port_id, uint16_t vlan_id, int on);
|
2012-12-19 23:00:00 +00:00
|
|
|
void vlan_extend_set(portid_t port_id, int on);
|
2016-03-11 16:50:57 +00:00
|
|
|
void vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type,
|
|
|
|
uint16_t tp_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
void tx_vlan_set(portid_t port_id, uint16_t vlan_id);
|
2015-06-11 07:03:58 +00:00
|
|
|
void tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer);
|
2012-09-04 12:54:00 +00:00
|
|
|
void tx_vlan_reset(portid_t port_id);
|
2014-06-05 05:08:50 +00:00
|
|
|
void tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value);
|
|
|
|
|
2017-10-20 17:09:48 +00:00
|
|
|
void set_xstats_hide_zero(uint8_t on_off);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
void set_verbose_level(uint16_t vb_level);
|
|
|
|
void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs);
|
2015-11-10 13:48:20 +00:00
|
|
|
void show_tx_pkt_segments(void);
|
|
|
|
void set_tx_pkt_split(const char *name);
|
2012-09-04 12:54:00 +00:00
|
|
|
void set_nb_pkt_per_burst(uint16_t pkt_burst);
|
2014-05-14 15:24:41 +00:00
|
|
|
char *list_pkt_forwarding_modes(void);
|
2016-06-14 23:08:02 +00:00
|
|
|
char *list_pkt_forwarding_retry_modes(void);
|
2012-09-04 12:54:00 +00:00
|
|
|
void set_pkt_forwarding_mode(const char *fwd_mode);
|
|
|
|
void start_packet_forwarding(int with_tx_first);
|
2019-03-25 08:51:46 +00:00
|
|
|
void fwd_stats_display(void);
|
|
|
|
void fwd_stats_reset(void);
|
2012-09-04 12:54:00 +00:00
|
|
|
void stop_packet_forwarding(void);
|
2014-05-28 07:15:02 +00:00
|
|
|
void dev_set_link_up(portid_t pid);
|
|
|
|
void dev_set_link_down(portid_t pid);
|
2012-12-19 23:00:00 +00:00
|
|
|
void init_port_config(void);
|
2015-07-27 15:54:35 +00:00
|
|
|
void set_port_slave_flag(portid_t slave_pid);
|
|
|
|
void clear_port_slave_flag(portid_t slave_pid);
|
2016-06-14 15:35:38 +00:00
|
|
|
uint8_t port_is_bonding_slave(portid_t slave_pid);
|
|
|
|
|
2015-10-31 15:57:30 +00:00
|
|
|
int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode,
|
|
|
|
enum rte_eth_nb_tcs num_tcs,
|
|
|
|
uint8_t pfc_en);
|
2014-02-12 15:32:25 +00:00
|
|
|
int start_port(portid_t pid);
|
2012-12-19 23:00:00 +00:00
|
|
|
void stop_port(portid_t pid);
|
|
|
|
void close_port(portid_t pid);
|
2017-07-23 09:15:12 +00:00
|
|
|
void reset_port(portid_t pid);
|
2015-02-25 19:32:29 +00:00
|
|
|
void attach_port(char *identifier);
|
2018-10-25 15:11:13 +00:00
|
|
|
void detach_port_device(portid_t port_id);
|
2012-12-19 23:00:00 +00:00
|
|
|
int all_ports_stopped(void);
|
2018-01-10 09:09:11 +00:00
|
|
|
int port_is_stopped(portid_t port_id);
|
2014-08-14 07:35:00 +00:00
|
|
|
int port_is_started(portid_t port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
void pmd_test_exit(void);
|
|
|
|
void fdir_get_infos(portid_t port_id);
|
2014-11-21 00:46:55 +00:00
|
|
|
void fdir_set_flex_mask(portid_t port_id,
|
|
|
|
struct rte_eth_fdir_flex_mask *cfg);
|
2014-11-21 00:46:56 +00:00
|
|
|
void fdir_set_flex_payload(portid_t port_id,
|
|
|
|
struct rte_eth_flex_payload_cfg *cfg);
|
2014-11-15 16:03:43 +00:00
|
|
|
void port_rss_reta_info(portid_t port_id,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t nb_entries);
|
2014-05-26 07:45:31 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2014-05-26 07:45:31 +00:00
|
|
|
int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate);
|
|
|
|
int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate,
|
|
|
|
uint64_t q_msk);
|
|
|
|
|
2018-10-04 19:24:44 +00:00
|
|
|
void port_rss_hash_conf_show(portid_t port_id, int show_rss_key);
|
2015-10-30 18:55:13 +00:00
|
|
|
void port_rss_hash_key_update(portid_t port_id, char rss_type[],
|
|
|
|
uint8_t *hash_key, uint hash_key_len);
|
2014-08-14 07:35:00 +00:00
|
|
|
int rx_queue_id_is_invalid(queueid_t rxq_id);
|
|
|
|
int tx_queue_id_is_invalid(queueid_t txq_id);
|
2017-10-07 07:45:57 +00:00
|
|
|
void setup_gro(const char *onoff, portid_t port_id);
|
|
|
|
void setup_gro_flush_cycles(uint8_t cycles);
|
|
|
|
void show_gro(portid_t port_id);
|
2017-10-07 14:56:43 +00:00
|
|
|
void setup_gso(const char *mode, portid_t port_id);
|
2014-05-16 08:58:42 +00:00
|
|
|
|
2015-05-28 15:05:20 +00:00
|
|
|
/* Functions to manage the set of filtered Multicast MAC addresses */
|
2019-05-21 16:13:03 +00:00
|
|
|
void mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr);
|
|
|
|
void mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr);
|
2017-10-12 09:32:52 +00:00
|
|
|
void port_dcb_info_display(portid_t port_id);
|
2015-05-28 15:05:20 +00:00
|
|
|
|
2017-11-23 16:15:00 +00:00
|
|
|
uint8_t *open_file(const char *file_path, uint32_t *size);
|
|
|
|
int save_file(const char *file_path, uint8_t *buf, uint32_t size);
|
|
|
|
int close_file(uint8_t *buf);
|
2017-03-30 02:51:48 +00:00
|
|
|
|
2017-10-11 08:55:33 +00:00
|
|
|
void port_queue_region_info_display(portid_t port_id, void *buf);
|
|
|
|
|
2015-02-25 19:32:29 +00:00
|
|
|
enum print_warning {
|
|
|
|
ENABLED_WARN = 0,
|
|
|
|
DISABLED_WARN
|
|
|
|
};
|
|
|
|
int port_id_is_invalid(portid_t port_id, enum print_warning warning);
|
2018-05-03 10:31:43 +00:00
|
|
|
void print_valid_ports(void);
|
2017-05-09 07:28:37 +00:00
|
|
|
int new_socket_id(unsigned int socket_id);
|
2015-02-25 19:32:29 +00:00
|
|
|
|
2018-01-12 11:31:21 +00:00
|
|
|
queueid_t get_allowed_max_nb_rxq(portid_t *pid);
|
|
|
|
int check_nb_rxq(queueid_t rxq);
|
2018-01-12 11:31:22 +00:00
|
|
|
queueid_t get_allowed_max_nb_txq(portid_t *pid);
|
|
|
|
int check_nb_txq(queueid_t txq);
|
2018-01-12 11:31:21 +00:00
|
|
|
|
2018-10-17 15:22:10 +00:00
|
|
|
uint16_t dump_rx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
|
|
|
|
uint16_t nb_pkts, __rte_unused uint16_t max_pkts,
|
|
|
|
__rte_unused void *user_param);
|
|
|
|
|
|
|
|
uint16_t dump_tx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
|
|
|
|
uint16_t nb_pkts, __rte_unused void *user_param);
|
|
|
|
|
|
|
|
void add_rx_dump_callbacks(portid_t portid);
|
|
|
|
void remove_rx_dump_callbacks(portid_t portid);
|
|
|
|
void add_tx_dump_callbacks(portid_t portid);
|
|
|
|
void remove_tx_dump_callbacks(portid_t portid);
|
2018-10-17 15:22:11 +00:00
|
|
|
void configure_rxtx_dump_callbacks(uint16_t verbose);
|
2018-10-17 15:22:09 +00:00
|
|
|
|
2018-10-24 06:21:59 +00:00
|
|
|
uint16_t tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue,
|
|
|
|
struct rte_mbuf *pkts[], uint16_t nb_pkts,
|
|
|
|
__rte_unused void *user_param);
|
|
|
|
void add_tx_md_callback(portid_t portid);
|
|
|
|
void remove_tx_md_callback(portid_t portid);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
|
|
|
* Work-around of a compilation error with ICC on invocations of the
|
|
|
|
* rte_be_to_cpu_16() function.
|
|
|
|
*/
|
|
|
|
#ifdef __GCC__
|
|
|
|
#define RTE_BE_TO_CPU_16(be_16_v) rte_be_to_cpu_16((be_16_v))
|
|
|
|
#define RTE_CPU_TO_BE_16(cpu_16_v) rte_cpu_to_be_16((cpu_16_v))
|
|
|
|
#else
|
2014-12-03 20:12:00 +00:00
|
|
|
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
|
2012-09-04 12:54:00 +00:00
|
|
|
#define RTE_BE_TO_CPU_16(be_16_v) (be_16_v)
|
|
|
|
#define RTE_CPU_TO_BE_16(cpu_16_v) (cpu_16_v)
|
|
|
|
#else
|
|
|
|
#define RTE_BE_TO_CPU_16(be_16_v) \
|
|
|
|
(uint16_t) ((((be_16_v) & 0xFF) << 8) | ((be_16_v) >> 8))
|
|
|
|
#define RTE_CPU_TO_BE_16(cpu_16_v) \
|
|
|
|
(uint16_t) ((((cpu_16_v) & 0xFF) << 8) | ((cpu_16_v) >> 8))
|
|
|
|
#endif
|
|
|
|
#endif /* __GCC__ */
|
|
|
|
|
2017-12-08 13:19:10 +00:00
|
|
|
#define TESTPMD_LOG(level, fmt, args...) \
|
|
|
|
rte_log(RTE_LOG_ ## level, testpmd_logtype, "testpmd: " fmt, ## args)
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
#endif /* _TESTPMD_H_ */
|