2017-12-19 15:48:59 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2017 Intel Corporation
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
*/
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
|
|
|
#include <rte_cycles.h>
|
|
|
|
#include <rte_mbuf.h>
|
2018-07-06 17:21:16 +00:00
|
|
|
#include <rte_malloc.h>
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
#include <rte_ethdev.h>
|
|
|
|
#include <rte_flow.h>
|
|
|
|
#include <rte_meter.h>
|
|
|
|
#include <rte_eth_softnic.h>
|
|
|
|
#include <rte_tm.h>
|
|
|
|
|
|
|
|
#include "testpmd.h"
|
|
|
|
|
|
|
|
#define SUBPORT_NODES_PER_PORT 1
|
|
|
|
#define PIPE_NODES_PER_SUBPORT 4096
|
|
|
|
#define TC_NODES_PER_PIPE 4
|
|
|
|
#define QUEUE_NODES_PER_TC 4
|
|
|
|
|
|
|
|
#define NUM_PIPE_NODES \
|
|
|
|
(SUBPORT_NODES_PER_PORT * PIPE_NODES_PER_SUBPORT)
|
|
|
|
|
|
|
|
#define NUM_TC_NODES \
|
|
|
|
(NUM_PIPE_NODES * TC_NODES_PER_PIPE)
|
|
|
|
|
|
|
|
#define ROOT_NODE_ID 1000000
|
|
|
|
#define SUBPORT_NODES_START_ID 900000
|
|
|
|
#define PIPE_NODES_START_ID 800000
|
|
|
|
#define TC_NODES_START_ID 700000
|
|
|
|
|
|
|
|
#define STATS_MASK_DEFAULT \
|
|
|
|
(RTE_TM_STATS_N_PKTS | \
|
|
|
|
RTE_TM_STATS_N_BYTES | \
|
|
|
|
RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
|
|
|
|
RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
|
|
|
|
|
|
|
|
#define STATS_MASK_QUEUE \
|
|
|
|
(STATS_MASK_DEFAULT | \
|
|
|
|
RTE_TM_STATS_N_PKTS_QUEUED)
|
|
|
|
|
|
|
|
#define BYTES_IN_MBPS (1000 * 1000 / 8)
|
|
|
|
#define TOKEN_BUCKET_SIZE 1000000
|
|
|
|
|
|
|
|
/* TM Hierarchy Levels */
|
|
|
|
enum tm_hierarchy_level {
|
|
|
|
TM_NODE_LEVEL_PORT = 0,
|
|
|
|
TM_NODE_LEVEL_SUBPORT,
|
|
|
|
TM_NODE_LEVEL_PIPE,
|
|
|
|
TM_NODE_LEVEL_TC,
|
|
|
|
TM_NODE_LEVEL_QUEUE,
|
|
|
|
TM_NODE_LEVEL_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct tm_hierarchy {
|
|
|
|
/* TM Nodes */
|
|
|
|
uint32_t root_node_id;
|
|
|
|
uint32_t subport_node_id[SUBPORT_NODES_PER_PORT];
|
|
|
|
uint32_t pipe_node_id[SUBPORT_NODES_PER_PORT][PIPE_NODES_PER_SUBPORT];
|
|
|
|
uint32_t tc_node_id[NUM_PIPE_NODES][TC_NODES_PER_PIPE];
|
|
|
|
uint32_t queue_node_id[NUM_TC_NODES][QUEUE_NODES_PER_TC];
|
|
|
|
|
|
|
|
/* TM Hierarchy Nodes Shaper Rates */
|
|
|
|
uint32_t root_node_shaper_rate;
|
|
|
|
uint32_t subport_node_shaper_rate;
|
|
|
|
uint32_t pipe_node_shaper_rate;
|
|
|
|
uint32_t tc_node_shaper_rate;
|
|
|
|
uint32_t tc_node_shared_shaper_rate;
|
|
|
|
|
|
|
|
uint32_t n_shapers;
|
|
|
|
};
|
|
|
|
|
2018-07-06 17:21:16 +00:00
|
|
|
static struct fwd_lcore *softnic_fwd_lcore;
|
|
|
|
static uint16_t softnic_port_id;
|
|
|
|
struct fwd_engine softnic_fwd_engine;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
|
|
|
|
/*
|
2018-07-06 17:21:16 +00:00
|
|
|
* Softnic packet forward
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
*/
|
|
|
|
static void
|
2018-07-06 17:21:16 +00:00
|
|
|
softnic_fwd(struct fwd_stream *fs)
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
{
|
|
|
|
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
|
|
|
|
uint16_t nb_rx;
|
|
|
|
uint16_t nb_tx;
|
|
|
|
uint32_t retry;
|
|
|
|
|
|
|
|
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
|
|
|
|
uint64_t start_tsc;
|
|
|
|
uint64_t end_tsc;
|
|
|
|
uint64_t core_cycles;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
|
|
|
|
start_tsc = rte_rdtsc();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Packets Receive */
|
|
|
|
nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
|
|
|
|
pkts_burst, nb_pkt_per_burst);
|
|
|
|
fs->rx_packets += nb_rx;
|
|
|
|
|
|
|
|
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
|
|
|
|
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
|
|
|
|
pkts_burst, nb_rx);
|
|
|
|
|
|
|
|
/* Retry if necessary */
|
|
|
|
if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
|
|
|
|
retry = 0;
|
|
|
|
while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
|
|
|
|
rte_delay_us(burst_tx_delay_time);
|
|
|
|
nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
|
|
|
|
&pkts_burst[nb_tx], nb_rx - nb_tx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fs->tx_packets += nb_tx;
|
|
|
|
|
|
|
|
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
|
|
|
|
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (unlikely(nb_tx < nb_rx)) {
|
|
|
|
fs->fwd_dropped += (nb_rx - nb_tx);
|
|
|
|
do {
|
|
|
|
rte_pktmbuf_free(pkts_burst[nb_tx]);
|
|
|
|
} while (++nb_tx < nb_rx);
|
|
|
|
}
|
|
|
|
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
|
|
|
|
end_tsc = rte_rdtsc();
|
|
|
|
core_cycles = (end_tsc - start_tsc);
|
|
|
|
fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-07-06 17:21:16 +00:00
|
|
|
softnic_fwd_run(struct fwd_stream *fs)
|
|
|
|
{
|
|
|
|
rte_pmd_softnic_run(softnic_port_id);
|
|
|
|
softnic_fwd(fs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Softnic init
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
softnic_begin(void *arg __rte_unused)
|
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
if (!softnic_fwd_lcore->stopped)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
/* Run softnic */
|
|
|
|
rte_pmd_softnic_run(softnic_port_id);
|
|
|
|
} while (!softnic_fwd_lcore->stopped);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-10 08:25:43 +00:00
|
|
|
static int
|
2018-07-06 17:21:16 +00:00
|
|
|
set_tm_hiearchy_nodes_shaper_rate(portid_t port_id,
|
|
|
|
struct tm_hierarchy *h)
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_link link_params;
|
|
|
|
uint64_t tm_port_rate;
|
2019-09-10 08:25:43 +00:00
|
|
|
int ret;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
|
|
|
|
memset(&link_params, 0, sizeof(link_params));
|
|
|
|
|
2019-09-10 08:25:43 +00:00
|
|
|
ret = rte_eth_link_get(port_id, &link_params);
|
|
|
|
if (ret < 0) {
|
|
|
|
printf("Error during getting device (port %u) link info: %s\n",
|
|
|
|
port_id, rte_strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2018-07-06 17:21:16 +00:00
|
|
|
tm_port_rate = (uint64_t)ETH_SPEED_NUM_10G * BYTES_IN_MBPS;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
|
|
|
|
/* Set tm hierarchy shapers rate */
|
|
|
|
h->root_node_shaper_rate = tm_port_rate;
|
|
|
|
h->subport_node_shaper_rate =
|
|
|
|
tm_port_rate / SUBPORT_NODES_PER_PORT;
|
|
|
|
h->pipe_node_shaper_rate
|
|
|
|
= h->subport_node_shaper_rate / PIPE_NODES_PER_SUBPORT;
|
|
|
|
h->tc_node_shaper_rate = h->pipe_node_shaper_rate;
|
|
|
|
h->tc_node_shared_shaper_rate = h->subport_node_shaper_rate;
|
2019-09-10 08:25:43 +00:00
|
|
|
|
|
|
|
return 0;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h,
|
|
|
|
struct rte_tm_error *error)
|
|
|
|
{
|
|
|
|
struct rte_tm_node_params rnp;
|
|
|
|
struct rte_tm_shaper_params rsp;
|
|
|
|
uint32_t priority, weight, level_id, shaper_profile_id;
|
|
|
|
|
|
|
|
memset(&rsp, 0, sizeof(struct rte_tm_shaper_params));
|
|
|
|
memset(&rnp, 0, sizeof(struct rte_tm_node_params));
|
|
|
|
|
|
|
|
/* Shaper profile Parameters */
|
|
|
|
rsp.peak.rate = h->root_node_shaper_rate;
|
|
|
|
rsp.peak.size = TOKEN_BUCKET_SIZE;
|
|
|
|
rsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
|
|
|
|
shaper_profile_id = 0;
|
|
|
|
|
|
|
|
if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
|
|
|
|
&rsp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
|
|
|
|
__func__, error->type, error->message,
|
|
|
|
shaper_profile_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Root Node Parameters */
|
|
|
|
h->root_node_id = ROOT_NODE_ID;
|
|
|
|
weight = 1;
|
|
|
|
priority = 0;
|
|
|
|
level_id = TM_NODE_LEVEL_PORT;
|
|
|
|
rnp.shaper_profile_id = shaper_profile_id;
|
|
|
|
rnp.nonleaf.n_sp_priorities = 1;
|
|
|
|
rnp.stats_mask = STATS_MASK_DEFAULT;
|
|
|
|
|
|
|
|
/* Add Node to TM Hierarchy */
|
|
|
|
if (rte_tm_node_add(port_id, h->root_node_id, RTE_TM_NODE_ID_NULL,
|
|
|
|
priority, weight, level_id, &rnp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(node_id %u, parent_id %u, level %u)\n",
|
|
|
|
__func__, error->type, error->message,
|
|
|
|
h->root_node_id, RTE_TM_NODE_ID_NULL,
|
|
|
|
level_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* Update */
|
|
|
|
h->n_shapers++;
|
|
|
|
|
|
|
|
printf(" Root node added (Start id %u, Count %u, level %u)\n",
|
|
|
|
h->root_node_id, 1, level_id);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-07-06 17:21:16 +00:00
|
|
|
softport_tm_subport_node_add(portid_t port_id,
|
|
|
|
struct tm_hierarchy *h,
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
struct rte_tm_error *error)
|
|
|
|
{
|
|
|
|
uint32_t subport_parent_node_id, subport_node_id = 0;
|
|
|
|
struct rte_tm_node_params snp;
|
|
|
|
struct rte_tm_shaper_params ssp;
|
|
|
|
uint32_t priority, weight, level_id, shaper_profile_id;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
memset(&ssp, 0, sizeof(struct rte_tm_shaper_params));
|
|
|
|
memset(&snp, 0, sizeof(struct rte_tm_node_params));
|
|
|
|
|
|
|
|
shaper_profile_id = h->n_shapers;
|
|
|
|
|
|
|
|
/* Add Shaper Profile to TM Hierarchy */
|
|
|
|
for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
|
|
|
|
ssp.peak.rate = h->subport_node_shaper_rate;
|
|
|
|
ssp.peak.size = TOKEN_BUCKET_SIZE;
|
|
|
|
ssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
|
|
|
|
|
|
|
|
if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
|
|
|
|
&ssp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
|
|
|
|
__func__, error->type, error->message,
|
|
|
|
shaper_profile_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Node Parameters */
|
|
|
|
h->subport_node_id[i] = SUBPORT_NODES_START_ID + i;
|
|
|
|
subport_parent_node_id = h->root_node_id;
|
|
|
|
weight = 1;
|
|
|
|
priority = 0;
|
|
|
|
level_id = TM_NODE_LEVEL_SUBPORT;
|
|
|
|
snp.shaper_profile_id = shaper_profile_id;
|
|
|
|
snp.nonleaf.n_sp_priorities = 1;
|
|
|
|
snp.stats_mask = STATS_MASK_DEFAULT;
|
|
|
|
|
|
|
|
/* Add Node to TM Hiearchy */
|
|
|
|
if (rte_tm_node_add(port_id,
|
|
|
|
h->subport_node_id[i],
|
|
|
|
subport_parent_node_id,
|
|
|
|
priority, weight,
|
|
|
|
level_id,
|
|
|
|
&snp,
|
|
|
|
error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(node %u,parent %u,level %u)\n",
|
|
|
|
__func__,
|
|
|
|
error->type,
|
|
|
|
error->message,
|
|
|
|
h->subport_node_id[i],
|
|
|
|
subport_parent_node_id,
|
|
|
|
level_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shaper_profile_id++;
|
|
|
|
subport_node_id++;
|
|
|
|
}
|
|
|
|
/* Update */
|
|
|
|
h->n_shapers = shaper_profile_id;
|
|
|
|
|
|
|
|
printf(" Subport nodes added (Start id %u, Count %u, level %u)\n",
|
|
|
|
h->subport_node_id[0], SUBPORT_NODES_PER_PORT, level_id);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-07-06 17:21:16 +00:00
|
|
|
softport_tm_pipe_node_add(portid_t port_id,
|
|
|
|
struct tm_hierarchy *h,
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
struct rte_tm_error *error)
|
|
|
|
{
|
|
|
|
uint32_t pipe_parent_node_id;
|
|
|
|
struct rte_tm_node_params pnp;
|
|
|
|
struct rte_tm_shaper_params psp;
|
|
|
|
uint32_t priority, weight, level_id, shaper_profile_id;
|
|
|
|
uint32_t i, j;
|
|
|
|
|
|
|
|
memset(&psp, 0, sizeof(struct rte_tm_shaper_params));
|
|
|
|
memset(&pnp, 0, sizeof(struct rte_tm_node_params));
|
|
|
|
|
|
|
|
shaper_profile_id = h->n_shapers;
|
|
|
|
|
|
|
|
/* Shaper Profile Parameters */
|
|
|
|
psp.peak.rate = h->pipe_node_shaper_rate;
|
|
|
|
psp.peak.size = TOKEN_BUCKET_SIZE;
|
|
|
|
psp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
|
|
|
|
|
|
|
|
/* Pipe Node Parameters */
|
|
|
|
weight = 1;
|
|
|
|
priority = 0;
|
|
|
|
level_id = TM_NODE_LEVEL_PIPE;
|
|
|
|
pnp.nonleaf.n_sp_priorities = 4;
|
|
|
|
pnp.stats_mask = STATS_MASK_DEFAULT;
|
|
|
|
|
|
|
|
/* Add Shaper Profiles and Nodes to TM Hierarchy */
|
|
|
|
for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
|
|
|
|
for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
|
|
|
|
if (rte_tm_shaper_profile_add(port_id,
|
|
|
|
shaper_profile_id, &psp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
|
|
|
|
__func__, error->type, error->message,
|
|
|
|
shaper_profile_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
pnp.shaper_profile_id = shaper_profile_id;
|
|
|
|
pipe_parent_node_id = h->subport_node_id[i];
|
|
|
|
h->pipe_node_id[i][j] = PIPE_NODES_START_ID +
|
|
|
|
(i * PIPE_NODES_PER_SUBPORT) + j;
|
|
|
|
|
|
|
|
if (rte_tm_node_add(port_id,
|
|
|
|
h->pipe_node_id[i][j],
|
|
|
|
pipe_parent_node_id,
|
|
|
|
priority, weight, level_id,
|
|
|
|
&pnp,
|
|
|
|
error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(node %u,parent %u )\n",
|
|
|
|
__func__,
|
|
|
|
error->type,
|
|
|
|
error->message,
|
|
|
|
h->pipe_node_id[i][j],
|
|
|
|
pipe_parent_node_id);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shaper_profile_id++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Update */
|
|
|
|
h->n_shapers = shaper_profile_id;
|
|
|
|
|
|
|
|
printf(" Pipe nodes added (Start id %u, Count %u, level %u)\n",
|
|
|
|
h->pipe_node_id[0][0], NUM_PIPE_NODES, level_id);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-07-06 17:21:16 +00:00
|
|
|
softport_tm_tc_node_add(portid_t port_id,
|
|
|
|
struct tm_hierarchy *h,
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
struct rte_tm_error *error)
|
|
|
|
{
|
|
|
|
uint32_t tc_parent_node_id;
|
|
|
|
struct rte_tm_node_params tnp;
|
|
|
|
struct rte_tm_shaper_params tsp, tssp;
|
|
|
|
uint32_t shared_shaper_profile_id[TC_NODES_PER_PIPE];
|
|
|
|
uint32_t priority, weight, level_id, shaper_profile_id;
|
|
|
|
uint32_t pos, n_tc_nodes, i, j, k;
|
|
|
|
|
|
|
|
memset(&tsp, 0, sizeof(struct rte_tm_shaper_params));
|
|
|
|
memset(&tssp, 0, sizeof(struct rte_tm_shaper_params));
|
|
|
|
memset(&tnp, 0, sizeof(struct rte_tm_node_params));
|
|
|
|
|
|
|
|
shaper_profile_id = h->n_shapers;
|
|
|
|
|
|
|
|
/* Private Shaper Profile (TC) Parameters */
|
|
|
|
tsp.peak.rate = h->tc_node_shaper_rate;
|
|
|
|
tsp.peak.size = TOKEN_BUCKET_SIZE;
|
|
|
|
tsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
|
|
|
|
|
|
|
|
/* Shared Shaper Profile (TC) Parameters */
|
|
|
|
tssp.peak.rate = h->tc_node_shared_shaper_rate;
|
|
|
|
tssp.peak.size = TOKEN_BUCKET_SIZE;
|
|
|
|
tssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
|
|
|
|
|
|
|
|
/* TC Node Parameters */
|
|
|
|
weight = 1;
|
|
|
|
level_id = TM_NODE_LEVEL_TC;
|
|
|
|
tnp.n_shared_shapers = 1;
|
|
|
|
tnp.nonleaf.n_sp_priorities = 1;
|
|
|
|
tnp.stats_mask = STATS_MASK_DEFAULT;
|
|
|
|
|
|
|
|
/* Add Shared Shaper Profiles to TM Hierarchy */
|
|
|
|
for (i = 0; i < TC_NODES_PER_PIPE; i++) {
|
|
|
|
shared_shaper_profile_id[i] = shaper_profile_id;
|
|
|
|
|
|
|
|
if (rte_tm_shaper_profile_add(port_id,
|
|
|
|
shared_shaper_profile_id[i], &tssp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(Shared shaper profileid %u)\n",
|
|
|
|
__func__, error->type, error->message,
|
|
|
|
shared_shaper_profile_id[i]);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_tm_shared_shaper_add_update(port_id, i,
|
|
|
|
shared_shaper_profile_id[i], error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(Shared shaper id %u)\n",
|
|
|
|
__func__, error->type, error->message, i);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shaper_profile_id++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add Shaper Profiles and Nodes to TM Hierarchy */
|
|
|
|
n_tc_nodes = 0;
|
|
|
|
for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
|
|
|
|
for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
|
|
|
|
for (k = 0; k < TC_NODES_PER_PIPE ; k++) {
|
|
|
|
priority = k;
|
|
|
|
tc_parent_node_id = h->pipe_node_id[i][j];
|
|
|
|
tnp.shared_shaper_id =
|
|
|
|
(uint32_t *)calloc(1, sizeof(uint32_t));
|
2018-01-22 15:45:53 +00:00
|
|
|
if (tnp.shared_shaper_id == NULL) {
|
|
|
|
printf("Shared shaper mem alloc err\n");
|
|
|
|
return -1;
|
|
|
|
}
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
tnp.shared_shaper_id[0] = k;
|
|
|
|
pos = j + (i * PIPE_NODES_PER_SUBPORT);
|
|
|
|
h->tc_node_id[pos][k] =
|
|
|
|
TC_NODES_START_ID + n_tc_nodes;
|
|
|
|
|
|
|
|
if (rte_tm_shaper_profile_add(port_id,
|
|
|
|
shaper_profile_id, &tsp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(shaper %u)\n",
|
|
|
|
__func__, error->type,
|
|
|
|
error->message,
|
|
|
|
shaper_profile_id);
|
|
|
|
|
2018-11-06 10:23:06 +00:00
|
|
|
free(tnp.shared_shaper_id);
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
tnp.shaper_profile_id = shaper_profile_id;
|
|
|
|
if (rte_tm_node_add(port_id,
|
|
|
|
h->tc_node_id[pos][k],
|
|
|
|
tc_parent_node_id,
|
|
|
|
priority, weight,
|
|
|
|
level_id,
|
|
|
|
&tnp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(node id %u)\n",
|
|
|
|
__func__,
|
|
|
|
error->type,
|
|
|
|
error->message,
|
|
|
|
h->tc_node_id[pos][k]);
|
|
|
|
|
2018-11-06 10:23:06 +00:00
|
|
|
free(tnp.shared_shaper_id);
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shaper_profile_id++;
|
|
|
|
n_tc_nodes++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Update */
|
|
|
|
h->n_shapers = shaper_profile_id;
|
|
|
|
|
|
|
|
printf(" TC nodes added (Start id %u, Count %u, level %u)\n",
|
|
|
|
h->tc_node_id[0][0], n_tc_nodes, level_id);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h,
|
|
|
|
struct rte_tm_error *error)
|
|
|
|
{
|
|
|
|
uint32_t queue_parent_node_id;
|
|
|
|
struct rte_tm_node_params qnp;
|
|
|
|
uint32_t priority, weight, level_id, pos;
|
|
|
|
uint32_t n_queue_nodes, i, j, k;
|
|
|
|
|
|
|
|
memset(&qnp, 0, sizeof(struct rte_tm_node_params));
|
|
|
|
|
|
|
|
/* Queue Node Parameters */
|
|
|
|
priority = 0;
|
|
|
|
weight = 1;
|
|
|
|
level_id = TM_NODE_LEVEL_QUEUE;
|
|
|
|
qnp.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
|
|
|
|
qnp.leaf.cman = RTE_TM_CMAN_TAIL_DROP;
|
|
|
|
qnp.stats_mask = STATS_MASK_QUEUE;
|
|
|
|
|
|
|
|
/* Add Queue Nodes to TM Hierarchy */
|
|
|
|
n_queue_nodes = 0;
|
|
|
|
for (i = 0; i < NUM_PIPE_NODES; i++) {
|
|
|
|
for (j = 0; j < TC_NODES_PER_PIPE; j++) {
|
|
|
|
queue_parent_node_id = h->tc_node_id[i][j];
|
|
|
|
for (k = 0; k < QUEUE_NODES_PER_TC; k++) {
|
|
|
|
pos = j + (i * TC_NODES_PER_PIPE);
|
|
|
|
h->queue_node_id[pos][k] = n_queue_nodes;
|
|
|
|
if (rte_tm_node_add(port_id,
|
|
|
|
h->queue_node_id[pos][k],
|
|
|
|
queue_parent_node_id,
|
|
|
|
priority,
|
|
|
|
weight,
|
|
|
|
level_id,
|
|
|
|
&qnp, error)) {
|
|
|
|
printf("%s ERROR(%d)-%s!(node %u)\n",
|
|
|
|
__func__,
|
|
|
|
error->type,
|
|
|
|
error->message,
|
|
|
|
h->queue_node_id[pos][k]);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
n_queue_nodes++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf(" Queue nodes added (Start id %u, Count %u, level %u)\n",
|
|
|
|
h->queue_node_id[0][0], n_queue_nodes, level_id);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-07-06 17:21:16 +00:00
|
|
|
softport_tm_hierarchy_specify(portid_t port_id,
|
|
|
|
struct rte_tm_error *error)
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
struct tm_hierarchy h;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
memset(&h, 0, sizeof(struct tm_hierarchy));
|
|
|
|
|
|
|
|
/* TM hierarchy shapers rate */
|
2019-09-10 08:25:43 +00:00
|
|
|
status = set_tm_hiearchy_nodes_shaper_rate(port_id, &h);
|
|
|
|
if (status)
|
|
|
|
return status;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
|
|
|
|
/* Add root node (level 0) */
|
|
|
|
status = softport_tm_root_node_add(port_id, &h, error);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* Add subport node (level 1) */
|
|
|
|
status = softport_tm_subport_node_add(port_id, &h, error);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* Add pipe nodes (level 2) */
|
|
|
|
status = softport_tm_pipe_node_add(port_id, &h, error);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* Add traffic class nodes (level 3) */
|
|
|
|
status = softport_tm_tc_node_add(port_id, &h, error);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
/* Add queue nodes (level 4) */
|
|
|
|
status = softport_tm_queue_node_add(port_id, &h, error);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-07-06 17:21:16 +00:00
|
|
|
* Softnic TM default configuration
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
*/
|
|
|
|
static void
|
2018-07-06 17:21:16 +00:00
|
|
|
softnic_tm_default_config(portid_t pi)
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
{
|
|
|
|
struct rte_port *port = &ports[pi];
|
2018-07-06 17:21:16 +00:00
|
|
|
struct rte_tm_error error;
|
|
|
|
int status;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
|
2018-07-06 17:21:16 +00:00
|
|
|
/* Stop port */
|
|
|
|
rte_eth_dev_stop(pi);
|
|
|
|
|
|
|
|
/* TM hierarchy specification */
|
|
|
|
status = softport_tm_hierarchy_specify(pi, &error);
|
|
|
|
if (status) {
|
|
|
|
printf(" TM Hierarchy built error(%d) - %s\n",
|
|
|
|
error.type, error.message);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
printf("\n TM Hierarchy Specified!\n");
|
|
|
|
|
|
|
|
/* TM hierarchy commit */
|
|
|
|
status = rte_tm_hierarchy_commit(pi, 0, &error);
|
|
|
|
if (status) {
|
|
|
|
printf(" Hierarchy commit error(%d) - %s\n",
|
|
|
|
error.type, error.message);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
printf(" Hierarchy Committed (port %u)!\n", pi);
|
|
|
|
|
|
|
|
/* Start port */
|
|
|
|
status = rte_eth_dev_start(pi);
|
|
|
|
if (status) {
|
|
|
|
printf("\n Port %u start error!\n", pi);
|
|
|
|
return;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
}
|
2018-07-06 17:21:16 +00:00
|
|
|
|
|
|
|
/* Reset the default hierarchy flag */
|
|
|
|
port->softport.default_tm_hierarchy_enable = 0;
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
}
|
|
|
|
|
2018-07-06 17:21:16 +00:00
|
|
|
/*
|
|
|
|
* Softnic forwarding init
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
softnic_fwd_begin(portid_t pi)
|
|
|
|
{
|
|
|
|
struct rte_port *port = &ports[pi];
|
|
|
|
uint32_t lcore, fwd_core_present = 0, softnic_run_launch = 0;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
softnic_fwd_lcore = port->softport.fwd_lcore_arg[0];
|
|
|
|
softnic_port_id = pi;
|
|
|
|
|
|
|
|
/* Launch softnic_run function on lcores */
|
|
|
|
for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
|
|
|
|
if (!rte_lcore_is_enabled(lcore))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (lcore == rte_get_master_lcore())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (fwd_core_present == 0) {
|
|
|
|
fwd_core_present++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = rte_eal_remote_launch(softnic_begin, NULL, lcore);
|
|
|
|
if (status)
|
|
|
|
printf("softnic launch on lcore %u failed (%d)\n",
|
|
|
|
lcore, status);
|
|
|
|
|
|
|
|
softnic_run_launch = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!softnic_run_launch)
|
|
|
|
softnic_fwd_engine.packet_fwd = softnic_fwd_run;
|
|
|
|
|
|
|
|
/* Softnic TM default configuration */
|
|
|
|
if (port->softport.default_tm_hierarchy_enable == 1)
|
|
|
|
softnic_tm_default_config(pi);
|
|
|
|
}
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
|
2018-07-06 17:21:16 +00:00
|
|
|
struct fwd_engine softnic_fwd_engine = {
|
|
|
|
.fwd_mode_name = "softnic",
|
|
|
|
.port_fwd_begin = softnic_fwd_begin,
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
.port_fwd_end = NULL,
|
2018-07-06 17:21:16 +00:00
|
|
|
.packet_fwd = softnic_fwd,
|
app/testpmd: add traffic management forwarding mode
This commit extends the testpmd application with new forwarding engine
that demonstrates the use of ethdev traffic management APIs and softnic
PMD for QoS traffic management.
In this mode, 5-level hierarchical tree of the QoS scheduler is built
with the help of ethdev TM APIs such as shaper profile add/delete,
shared shaper add/update, node add/delete, hierarchy commit, etc.
The hierarchical tree has following nodes; root node(x1, level 0),
subport node(x1, level 1), pipe node(x4096, level 2),
tc node(x16348, level 3), queue node(x65536, level 4).
During runtime, each received packet is first classified by mapping the
packet fields information to 5-tuples (HQoS subport, pipe, traffic class,
queue within traffic class, and color) and storing it in the packet mbuf
sched field. After classification, each packet is sent to softnic port
which prioritizes the transmission of the received packets, and
accordingly sends them on to the output interface.
To enable traffic management mode, following testpmd command is used;
$ ./testpmd -c c -n 4 --vdev
'net_softnic0,hard_name=0000:06:00.1,soft_tm=on' -- -i
--forward-mode=tm
Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2017-10-10 10:18:18 +00:00
|
|
|
};
|