sched: add PIE based congestion management
Implement PIE based congestion management based on rfc8033. The Proportional Integral Controller Enhanced (PIE) algorithm works by proactively dropping packets randomly. PIE is implemented as more advanced queue management is required to address the bufferbloat problem and provide desirable quality of service to users. Tests for PIE code added to test application. Added PIE related information to documentation. Signed-off-by: Wojciech Liguzinski <wojciechx.liguzinski@intel.com> Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com> Acked-by: Jasvinder Singh <jasvinder.singh@intel.com>
This commit is contained in:
parent
f2777b53b1
commit
44c730b0e3
@ -1428,6 +1428,7 @@ M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
|
|||||||
M: Jasvinder Singh <jasvinder.singh@intel.com>
|
M: Jasvinder Singh <jasvinder.singh@intel.com>
|
||||||
F: lib/sched/
|
F: lib/sched/
|
||||||
F: doc/guides/prog_guide/qos_framework.rst
|
F: doc/guides/prog_guide/qos_framework.rst
|
||||||
|
F: app/test/test_pie.c
|
||||||
F: app/test/test_red.c
|
F: app/test/test_red.c
|
||||||
F: app/test/test_sched.c
|
F: app/test/test_sched.c
|
||||||
F: examples/qos_sched/
|
F: examples/qos_sched/
|
||||||
|
@ -115,6 +115,7 @@ test_sources = files(
|
|||||||
'test_reciprocal_division.c',
|
'test_reciprocal_division.c',
|
||||||
'test_reciprocal_division_perf.c',
|
'test_reciprocal_division_perf.c',
|
||||||
'test_red.c',
|
'test_red.c',
|
||||||
|
'test_pie.c',
|
||||||
'test_reorder.c',
|
'test_reorder.c',
|
||||||
'test_rib.c',
|
'test_rib.c',
|
||||||
'test_rib6.c',
|
'test_rib6.c',
|
||||||
@ -250,6 +251,7 @@ fast_tests = [
|
|||||||
['prefetch_autotest', true],
|
['prefetch_autotest', true],
|
||||||
['rcu_qsbr_autotest', true],
|
['rcu_qsbr_autotest', true],
|
||||||
['red_autotest', true],
|
['red_autotest', true],
|
||||||
|
['pie_autotest', true],
|
||||||
['rib_autotest', true],
|
['rib_autotest', true],
|
||||||
['rib6_autotest', true],
|
['rib6_autotest', true],
|
||||||
['ring_autotest', true],
|
['ring_autotest', true],
|
||||||
@ -301,6 +303,7 @@ perf_test_names = [
|
|||||||
'fib_slow_autotest',
|
'fib_slow_autotest',
|
||||||
'fib_perf_autotest',
|
'fib_perf_autotest',
|
||||||
'red_all',
|
'red_all',
|
||||||
|
'pie_all',
|
||||||
'barrier_autotest',
|
'barrier_autotest',
|
||||||
'hash_multiwriter_autotest',
|
'hash_multiwriter_autotest',
|
||||||
'timer_racecond_autotest',
|
'timer_racecond_autotest',
|
||||||
@ -314,6 +317,7 @@ perf_test_names = [
|
|||||||
'fib6_perf_autotest',
|
'fib6_perf_autotest',
|
||||||
'rcu_qsbr_perf_autotest',
|
'rcu_qsbr_perf_autotest',
|
||||||
'red_perf',
|
'red_perf',
|
||||||
|
'pie_perf',
|
||||||
'distributor_perf_autotest',
|
'distributor_perf_autotest',
|
||||||
'pmd_perf_autotest',
|
'pmd_perf_autotest',
|
||||||
'stack_perf_autotest',
|
'stack_perf_autotest',
|
||||||
|
1065
app/test/test_pie.c
Normal file
1065
app/test/test_pie.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -89,7 +89,7 @@
|
|||||||
#define RTE_MAX_LCORE_FREQS 64
|
#define RTE_MAX_LCORE_FREQS 64
|
||||||
|
|
||||||
/* rte_sched defines */
|
/* rte_sched defines */
|
||||||
#undef RTE_SCHED_RED
|
#undef RTE_SCHED_CMAN
|
||||||
#undef RTE_SCHED_COLLECT_STATS
|
#undef RTE_SCHED_COLLECT_STATS
|
||||||
#undef RTE_SCHED_SUBPORT_TC_OV
|
#undef RTE_SCHED_SUBPORT_TC_OV
|
||||||
#define RTE_SCHED_PORT_N_GRINDERS 8
|
#define RTE_SCHED_PORT_N_GRINDERS 8
|
||||||
|
@ -158,6 +158,9 @@ PCI
|
|||||||
PHY
|
PHY
|
||||||
An abbreviation for the physical layer of the OSI model.
|
An abbreviation for the physical layer of the OSI model.
|
||||||
|
|
||||||
|
PIE
|
||||||
|
Proportional Integral Controller Enhanced (RFC8033)
|
||||||
|
|
||||||
pktmbuf
|
pktmbuf
|
||||||
An *mbuf* carrying a network packet.
|
An *mbuf* carrying a network packet.
|
||||||
|
|
||||||
|
@ -56,7 +56,8 @@ A functional description of each block is provided in the following table.
|
|||||||
| | | |
|
| | | |
|
||||||
+---+------------------------+--------------------------------------------------------------------------------+
|
+---+------------------------+--------------------------------------------------------------------------------+
|
||||||
| 7 | Dropper | Congestion management using the Random Early Detection (RED) algorithm |
|
| 7 | Dropper | Congestion management using the Random Early Detection (RED) algorithm |
|
||||||
| | | (specified by the Sally Floyd - Van Jacobson paper) or Weighted RED (WRED). |
|
| | | (specified by the Sally Floyd - Van Jacobson paper) or Weighted RED (WRED) |
|
||||||
|
| | | or Proportional Integral Controller Enhanced (PIE). |
|
||||||
| | | Drop packets based on the current scheduler queue load level and packet |
|
| | | Drop packets based on the current scheduler queue load level and packet |
|
||||||
| | | priority. When congestion is experienced, lower priority packets are dropped |
|
| | | priority. When congestion is experienced, lower priority packets are dropped |
|
||||||
| | | first. |
|
| | | first. |
|
||||||
@ -421,7 +422,7 @@ No input packet can be part of more than one pipeline stage at a given time.
|
|||||||
The congestion management scheme implemented by the enqueue pipeline described above is very basic:
|
The congestion management scheme implemented by the enqueue pipeline described above is very basic:
|
||||||
packets are enqueued until a specific queue becomes full,
|
packets are enqueued until a specific queue becomes full,
|
||||||
then all the packets destined to the same queue are dropped until packets are consumed (by the dequeue operation).
|
then all the packets destined to the same queue are dropped until packets are consumed (by the dequeue operation).
|
||||||
This can be improved by enabling RED/WRED as part of the enqueue pipeline which looks at the queue occupancy and
|
This can be improved by enabling RED/WRED or PIE as part of the enqueue pipeline which looks at the queue occupancy and
|
||||||
packet priority in order to yield the enqueue/drop decision for a specific packet
|
packet priority in order to yield the enqueue/drop decision for a specific packet
|
||||||
(as opposed to enqueuing all packets / dropping all packets indiscriminately).
|
(as opposed to enqueuing all packets / dropping all packets indiscriminately).
|
||||||
|
|
||||||
@ -1155,13 +1156,13 @@ If the number of queues is small,
|
|||||||
then the performance of the port scheduler for the same level of active traffic is expected to be worse than
|
then the performance of the port scheduler for the same level of active traffic is expected to be worse than
|
||||||
the performance of a small set of message passing queues.
|
the performance of a small set of message passing queues.
|
||||||
|
|
||||||
.. _Dropper:
|
.. _Droppers:
|
||||||
|
|
||||||
Dropper
|
Droppers
|
||||||
-------
|
--------
|
||||||
|
|
||||||
The purpose of the DPDK dropper is to drop packets arriving at a packet scheduler to avoid congestion.
|
The purpose of the DPDK dropper is to drop packets arriving at a packet scheduler to avoid congestion.
|
||||||
The dropper supports the Random Early Detection (RED),
|
The dropper supports the Proportional Integral Controller Enhanced (PIE), Random Early Detection (RED),
|
||||||
Weighted Random Early Detection (WRED) and tail drop algorithms.
|
Weighted Random Early Detection (WRED) and tail drop algorithms.
|
||||||
:numref:`figure_blk_diag_dropper` illustrates how the dropper integrates with the scheduler.
|
:numref:`figure_blk_diag_dropper` illustrates how the dropper integrates with the scheduler.
|
||||||
The DPDK currently does not support congestion management
|
The DPDK currently does not support congestion management
|
||||||
@ -1174,9 +1175,13 @@ so the dropper provides the only method for congestion avoidance.
|
|||||||
High-level Block Diagram of the DPDK Dropper
|
High-level Block Diagram of the DPDK Dropper
|
||||||
|
|
||||||
|
|
||||||
The dropper uses the Random Early Detection (RED) congestion avoidance algorithm as documented in the reference publication.
|
The dropper uses one of two congestion avoidance algorithms:
|
||||||
The purpose of the RED algorithm is to monitor a packet queue,
|
- the Random Early Detection (RED) as documented in the reference publication.
|
||||||
|
- the Proportional Integral Controller Enhanced (PIE) as documented in RFC8033 publication.
|
||||||
|
|
||||||
|
The purpose of the RED/PIE algorithm is to monitor a packet queue,
|
||||||
determine the current congestion level in the queue and decide whether an arriving packet should be enqueued or dropped.
|
determine the current congestion level in the queue and decide whether an arriving packet should be enqueued or dropped.
|
||||||
|
|
||||||
The RED algorithm uses an Exponential Weighted Moving Average (EWMA) filter to compute average queue size which
|
The RED algorithm uses an Exponential Weighted Moving Average (EWMA) filter to compute average queue size which
|
||||||
gives an indication of the current congestion level in the queue.
|
gives an indication of the current congestion level in the queue.
|
||||||
|
|
||||||
@ -1192,7 +1197,7 @@ This occurs when a packet queue has reached maximum capacity and cannot store an
|
|||||||
In this situation, all arriving packets are dropped.
|
In this situation, all arriving packets are dropped.
|
||||||
|
|
||||||
The flow through the dropper is illustrated in :numref:`figure_flow_tru_droppper`.
|
The flow through the dropper is illustrated in :numref:`figure_flow_tru_droppper`.
|
||||||
The RED/WRED algorithm is exercised first and tail drop second.
|
The RED/WRED/PIE algorithm is exercised first and tail drop second.
|
||||||
|
|
||||||
.. _figure_flow_tru_droppper:
|
.. _figure_flow_tru_droppper:
|
||||||
|
|
||||||
@ -1200,6 +1205,16 @@ The RED/WRED algorithm is exercised first and tail drop second.
|
|||||||
|
|
||||||
Flow Through the Dropper
|
Flow Through the Dropper
|
||||||
|
|
||||||
|
The PIE algorithm periodically updates the drop probability based on the latency samples.
|
||||||
|
The current latency sample but also analyze whether the latency is trending up or down.
|
||||||
|
This is the classical Proportional Integral (PI) controller method, which is known for
|
||||||
|
eliminating steady state errors.
|
||||||
|
|
||||||
|
When a congestion period ends, we might be left with a high drop probability with light
|
||||||
|
packet arrivals. Hence, the PIE algorithm includes a mechanism by which the drop probability
|
||||||
|
decays exponentially (rather than linearly) when the system is not congested.
|
||||||
|
This would help the drop probability converge to 0 more quickly, while the PI controller ensures
|
||||||
|
that it would eventually reach zero.
|
||||||
|
|
||||||
The use cases supported by the dropper are:
|
The use cases supported by the dropper are:
|
||||||
|
|
||||||
@ -1253,6 +1268,35 @@ to a mark probability of 1/10 (that is, 1 in 10 packets will be dropped).
|
|||||||
The EWMA filter weight parameter is specified as an inverse log value,
|
The EWMA filter weight parameter is specified as an inverse log value,
|
||||||
for example, a filter weight parameter value of 9 corresponds to a filter weight of 1/29.
|
for example, a filter weight parameter value of 9 corresponds to a filter weight of 1/29.
|
||||||
|
|
||||||
|
A PIE configuration contains the parameters given in :numref:`table_qos_16a`.
|
||||||
|
|
||||||
|
.. _table_qos_16a:
|
||||||
|
|
||||||
|
.. table:: PIE Configuration Parameters
|
||||||
|
|
||||||
|
+--------------------------+---------+---------+------------------+
|
||||||
|
| Parameter | Minimum | Maximum | Default |
|
||||||
|
| | | | |
|
||||||
|
+==========================+=========+=========+==================+
|
||||||
|
| Queue delay reference | 1 | uint16 | 15 |
|
||||||
|
| Latency Target Value | | | |
|
||||||
|
| Unit: ms | | | |
|
||||||
|
+--------------------------+---------+---------+------------------+
|
||||||
|
| Max Burst Allowance | 1 | uint16 | 150 |
|
||||||
|
| Unit: ms | | | |
|
||||||
|
+--------------------------+---------+---------+------------------+
|
||||||
|
| Tail Drop Threshold | 1 | uint16 | 64 |
|
||||||
|
| Unit: bytes | | | |
|
||||||
|
+--------------------------+---------+---------+------------------+
|
||||||
|
| Period to calculate | 1 | uint16 | 15 |
|
||||||
|
| drop probability | | | |
|
||||||
|
| Unit: ms | | | |
|
||||||
|
+--------------------------+---------+---------+------------------+
|
||||||
|
|
||||||
|
The meaning of these parameters is explained in more detail in the next sections.
|
||||||
|
The format of these parameters as specified to the dropper module API.
|
||||||
|
They could made self calculated for fine tuning, within the apps.
|
||||||
|
|
||||||
.. _Enqueue_Operation:
|
.. _Enqueue_Operation:
|
||||||
|
|
||||||
Enqueue Operation
|
Enqueue Operation
|
||||||
@ -1396,7 +1440,7 @@ As can be seen, the floating-point implementation achieved the worst performance
|
|||||||
| Method | Relative Performance |
|
| Method | Relative Performance |
|
||||||
| | |
|
| | |
|
||||||
+====================================================================================+======================+
|
+====================================================================================+======================+
|
||||||
| Current dropper method (see :ref:`Section 23.3.2.1.3 <Dropper>`) | 100% |
|
| Current dropper method (see :ref:`Section 23.3.2.1.3 <Droppers>`) | 100% |
|
||||||
| | |
|
| | |
|
||||||
+------------------------------------------------------------------------------------+----------------------+
|
+------------------------------------------------------------------------------------+----------------------+
|
||||||
| Fixed-point method with small (512B) look-up table | 148% |
|
| Fixed-point method with small (512B) look-up table | 148% |
|
||||||
|
@ -22,6 +22,7 @@ Main features:
|
|||||||
shared (by multiple nodes) shapers
|
shared (by multiple nodes) shapers
|
||||||
* Congestion management for hierarchy leaf nodes: algorithms of tail drop, head
|
* Congestion management for hierarchy leaf nodes: algorithms of tail drop, head
|
||||||
drop, WRED, private (per node) and shared (by multiple nodes) WRED contexts
|
drop, WRED, private (per node) and shared (by multiple nodes) WRED contexts
|
||||||
|
and PIE.
|
||||||
* Packet marking: IEEE 802.1q (VLAN DEI), IETF RFC 3168 (IPv4/IPv6 ECN for TCP
|
* Packet marking: IEEE 802.1q (VLAN DEI), IETF RFC 3168 (IPv4/IPv6 ECN for TCP
|
||||||
and SCTP), IETF RFC 2597 (IPv4 / IPv6 DSCP)
|
and SCTP), IETF RFC 2597 (IPv4 / IPv6 DSCP)
|
||||||
|
|
||||||
@ -103,8 +104,9 @@ Congestion Management
|
|||||||
Congestion management is used to control the admission of packets into a packet
|
Congestion management is used to control the admission of packets into a packet
|
||||||
queue or group of packet queues on congestion. The congestion management
|
queue or group of packet queues on congestion. The congestion management
|
||||||
algorithms that are supported are: Tail Drop, Head Drop and Weighted Random
|
algorithms that are supported are: Tail Drop, Head Drop and Weighted Random
|
||||||
Early Detection (WRED). They are made available for every leaf node in the
|
Early Detection (WRED), Proportional Integral Controller Enhanced (PIE).
|
||||||
hierarchy, subject to the specific implementation supporting them.
|
They are made available for every leaf node in the hierarchy, subject to
|
||||||
|
the specific implementation supporting them.
|
||||||
On request of writing a new packet into the current queue while the queue is
|
On request of writing a new packet into the current queue while the queue is
|
||||||
full, the Tail Drop algorithm drops the new packet while leaving the queue
|
full, the Tail Drop algorithm drops the new packet while leaving the queue
|
||||||
unmodified, as opposed to the Head Drop* algorithm, which drops the packet
|
unmodified, as opposed to the Head Drop* algorithm, which drops the packet
|
||||||
@ -128,6 +130,13 @@ The configuration of WRED private and shared contexts is done through the
|
|||||||
definition of WRED profiles. Any WRED profile can be used by one or several
|
definition of WRED profiles. Any WRED profile can be used by one or several
|
||||||
WRED contexts (either private or shared).
|
WRED contexts (either private or shared).
|
||||||
|
|
||||||
|
The Proportional Integral Controller Enhanced (PIE) algorithm works by proactively
|
||||||
|
dropping packets randomly. Calculated drop probability is updated periodically,
|
||||||
|
based on latency measured and desired and whether the queuing latency is currently
|
||||||
|
trending up or down. Queuing latency can be obtained using direct measurement or
|
||||||
|
on estimations calculated from the queue length and dequeue rate. The random drop
|
||||||
|
is triggered by a packet's arrival before enqueuing into a queue.
|
||||||
|
|
||||||
|
|
||||||
Packet Marking
|
Packet Marking
|
||||||
--------------
|
--------------
|
||||||
|
@ -420,7 +420,7 @@ pmd_tm_node_type_get(struct rte_eth_dev *dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
#define WRED_SUPPORTED 1
|
#define WRED_SUPPORTED 1
|
||||||
#else
|
#else
|
||||||
#define WRED_SUPPORTED 0
|
#define WRED_SUPPORTED 0
|
||||||
@ -2306,7 +2306,7 @@ tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
|
|
||||||
static void
|
static void
|
||||||
wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
|
wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
|
||||||
@ -2321,7 +2321,7 @@ wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
|
|||||||
for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
|
for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
|
||||||
for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
|
for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) {
|
||||||
struct rte_red_params *dst =
|
struct rte_red_params *dst =
|
||||||
&pp->red_params[tc_id][color];
|
&pp->cman_params->red_params[tc_id][color];
|
||||||
struct tm_wred_profile *src_wp =
|
struct tm_wred_profile *src_wp =
|
||||||
tm_tc_wred_profile_get(dev, tc_id);
|
tm_tc_wred_profile_get(dev, tc_id);
|
||||||
struct rte_tm_red_params *src =
|
struct rte_tm_red_params *src =
|
||||||
|
@ -25,7 +25,7 @@ static const struct rte_sched_subport_params subport_params_default = {
|
|||||||
.pipe_profiles = pipe_profile,
|
.pipe_profiles = pipe_profile,
|
||||||
.n_pipe_profiles = 0, /* filled at run time */
|
.n_pipe_profiles = 0, /* filled at run time */
|
||||||
.n_max_pipe_profiles = RTE_DIM(pipe_profile),
|
.n_max_pipe_profiles = RTE_DIM(pipe_profile),
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
.red_params = {
|
.red_params = {
|
||||||
/* Traffic Class 0 Colors Green / Yellow / Red */
|
/* Traffic Class 0 Colors Green / Yellow / Red */
|
||||||
[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
||||||
@ -92,7 +92,7 @@ static const struct rte_sched_subport_params subport_params_default = {
|
|||||||
[12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
[12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
||||||
[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
||||||
},
|
},
|
||||||
#endif /* RTE_SCHED_RED */
|
#endif /* RTE_SCHED_CMAN */
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct tmgr_port_list tmgr_port_list;
|
static struct tmgr_port_list tmgr_port_list;
|
||||||
|
@ -242,7 +242,7 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
|
|||||||
memset(active_queues, 0, sizeof(active_queues));
|
memset(active_queues, 0, sizeof(active_queues));
|
||||||
n_active_queues = 0;
|
n_active_queues = 0;
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
char sec_name[CFG_NAME_LEN];
|
char sec_name[CFG_NAME_LEN];
|
||||||
struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
|
struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
|
||||||
|
|
||||||
@ -315,7 +315,7 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* RTE_SCHED_RED */
|
#endif /* RTE_SCHED_CMAN */
|
||||||
|
|
||||||
for (i = 0; i < MAX_SCHED_SUBPORTS; i++) {
|
for (i = 0; i < MAX_SCHED_SUBPORTS; i++) {
|
||||||
char sec_name[CFG_NAME_LEN];
|
char sec_name[CFG_NAME_LEN];
|
||||||
@ -393,7 +393,7 @@ cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
|
for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
|
||||||
for (k = 0; k < RTE_COLORS; k++) {
|
for (k = 0; k < RTE_COLORS; k++) {
|
||||||
subport_params[i].red_params[j][k].min_th =
|
subport_params[i].red_params[j][k].min_th =
|
||||||
|
@ -211,7 +211,7 @@ struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
|
|||||||
.n_pipe_profiles = sizeof(pipe_profiles) /
|
.n_pipe_profiles = sizeof(pipe_profiles) /
|
||||||
sizeof(struct rte_sched_pipe_params),
|
sizeof(struct rte_sched_pipe_params),
|
||||||
.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
|
.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
.red_params = {
|
.red_params = {
|
||||||
/* Traffic Class 0 Colors Green / Yellow / Red */
|
/* Traffic Class 0 Colors Green / Yellow / Red */
|
||||||
[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
[0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
||||||
@ -278,7 +278,7 @@ struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
|
|||||||
[12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
[12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
||||||
[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
|
||||||
},
|
},
|
||||||
#endif /* RTE_SCHED_RED */
|
#endif /* RTE_SCHED_CMAN */
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -7,11 +7,12 @@ if is_windows
|
|||||||
subdir_done()
|
subdir_done()
|
||||||
endif
|
endif
|
||||||
|
|
||||||
sources = files('rte_sched.c', 'rte_red.c', 'rte_approx.c')
|
sources = files('rte_sched.c', 'rte_red.c', 'rte_approx.c', 'rte_pie.c')
|
||||||
headers = files(
|
headers = files(
|
||||||
'rte_approx.h',
|
'rte_approx.h',
|
||||||
'rte_red.h',
|
'rte_red.h',
|
||||||
'rte_sched.h',
|
'rte_sched.h',
|
||||||
'rte_sched_common.h',
|
'rte_sched_common.h',
|
||||||
|
'rte_pie.h',
|
||||||
)
|
)
|
||||||
deps += ['mbuf', 'meter']
|
deps += ['mbuf', 'meter']
|
||||||
|
86
lib/sched/rte_pie.c
Normal file
86
lib/sched/rte_pie.c
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
/* SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
* Copyright(c) 2020 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
#include "rte_pie.h"
|
||||||
|
#include <rte_common.h>
|
||||||
|
#include <rte_cycles.h>
|
||||||
|
#include <rte_malloc.h>
|
||||||
|
|
||||||
|
#ifdef __INTEL_COMPILER
|
||||||
|
#pragma warning(disable:2259) /* conversion may lose significant bits */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int
|
||||||
|
rte_pie_rt_data_init(struct rte_pie *pie)
|
||||||
|
{
|
||||||
|
if (pie == NULL) {
|
||||||
|
/* Allocate memory to use the PIE data structure */
|
||||||
|
pie = rte_malloc(NULL, sizeof(struct rte_pie), 0);
|
||||||
|
|
||||||
|
if (pie == NULL)
|
||||||
|
RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__);
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pie->active = 0;
|
||||||
|
pie->in_measurement = 0;
|
||||||
|
pie->departed_bytes_count = 0;
|
||||||
|
pie->start_measurement = 0;
|
||||||
|
pie->last_measurement = 0;
|
||||||
|
pie->qlen = 0;
|
||||||
|
pie->avg_dq_time = 0;
|
||||||
|
pie->burst_allowance = 0;
|
||||||
|
pie->qdelay_old = 0;
|
||||||
|
pie->drop_prob = 0;
|
||||||
|
pie->accu_prob = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
rte_pie_config_init(struct rte_pie_config *pie_cfg,
|
||||||
|
const uint16_t qdelay_ref,
|
||||||
|
const uint16_t dp_update_interval,
|
||||||
|
const uint16_t max_burst,
|
||||||
|
const uint16_t tailq_th)
|
||||||
|
{
|
||||||
|
uint64_t tsc_hz = rte_get_tsc_hz();
|
||||||
|
|
||||||
|
if (pie_cfg == NULL)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if (qdelay_ref <= 0) {
|
||||||
|
RTE_LOG(ERR, SCHED,
|
||||||
|
"%s: Incorrect value for qdelay_ref\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dp_update_interval <= 0) {
|
||||||
|
RTE_LOG(ERR, SCHED,
|
||||||
|
"%s: Incorrect value for dp_update_interval\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (max_burst <= 0) {
|
||||||
|
RTE_LOG(ERR, SCHED,
|
||||||
|
"%s: Incorrect value for max_burst\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tailq_th <= 0) {
|
||||||
|
RTE_LOG(ERR, SCHED,
|
||||||
|
"%s: Incorrect value for tailq_th\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pie_cfg->qdelay_ref = (tsc_hz * qdelay_ref) / 1000;
|
||||||
|
pie_cfg->dp_update_interval = (tsc_hz * dp_update_interval) / 1000;
|
||||||
|
pie_cfg->max_burst = (tsc_hz * max_burst) / 1000;
|
||||||
|
pie_cfg->tailq_th = tailq_th;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
396
lib/sched/rte_pie.h
Normal file
396
lib/sched/rte_pie.h
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
/* SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
* Copyright(c) 2020 Intel Corporation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __RTE_PIE_H_INCLUDED__
|
||||||
|
#define __RTE_PIE_H_INCLUDED__
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* Proportional Integral controller Enhanced (PIE)
|
||||||
|
**/
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <rte_random.h>
|
||||||
|
#include <rte_debug.h>
|
||||||
|
#include <rte_cycles.h>
|
||||||
|
|
||||||
|
#define RTE_DQ_THRESHOLD 16384 /**< Queue length threshold (2^14)
|
||||||
|
* to start measurement cycle (bytes)
|
||||||
|
*/
|
||||||
|
#define RTE_DQ_WEIGHT 0.25 /**< Weight (RTE_DQ_THRESHOLD/2^16) to compute dequeue rate */
|
||||||
|
#define RTE_ALPHA 0.125 /**< Weights in drop probability calculations */
|
||||||
|
#define RTE_BETA 1.25 /**< Weights in drop probability calculations */
|
||||||
|
#define RTE_RAND_MAX ~0LLU /**< Max value of the random number */
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* PIE configuration parameters passed by user
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct rte_pie_params {
|
||||||
|
uint16_t qdelay_ref; /**< Latency Target (milliseconds) */
|
||||||
|
uint16_t dp_update_interval; /**< Update interval for drop probability (milliseconds) */
|
||||||
|
uint16_t max_burst; /**< Max Burst Allowance (milliseconds) */
|
||||||
|
uint16_t tailq_th; /**< Tailq drop threshold (packet counts) */
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* PIE configuration parameters
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
struct rte_pie_config {
|
||||||
|
uint64_t qdelay_ref; /**< Latency Target (in CPU cycles.) */
|
||||||
|
uint64_t dp_update_interval; /**< Update interval for drop probability (in CPU cycles) */
|
||||||
|
uint64_t max_burst; /**< Max Burst Allowance (in CPU cycles.) */
|
||||||
|
uint16_t tailq_th; /**< Tailq drop threshold (packet counts) */
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* PIE run-time data
|
||||||
|
*/
|
||||||
|
struct rte_pie {
|
||||||
|
uint16_t active; /**< Flag for activating/deactivating pie */
|
||||||
|
uint16_t in_measurement; /**< Flag for activation of measurement cycle */
|
||||||
|
uint32_t departed_bytes_count; /**< Number of bytes departed in current measurement cycle */
|
||||||
|
uint64_t start_measurement; /**< Time to start to measurement cycle (in cpu cycles) */
|
||||||
|
uint64_t last_measurement; /**< Time of last measurement (in cpu cycles) */
|
||||||
|
uint64_t qlen; /**< Queue length (packets count) */
|
||||||
|
uint64_t qlen_bytes; /**< Queue length (bytes count) */
|
||||||
|
uint64_t avg_dq_time; /**< Time averaged dequeue rate (in cpu cycles) */
|
||||||
|
uint32_t burst_allowance; /**< Current burst allowance (bytes) */
|
||||||
|
uint64_t qdelay_old; /**< Old queue delay (bytes) */
|
||||||
|
double drop_prob; /**< Current packet drop probability */
|
||||||
|
double accu_prob; /**< Accumulated packet drop probability */
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Initialises run-time data
|
||||||
|
*
|
||||||
|
* @param pie [in,out] data pointer to PIE runtime data
|
||||||
|
*
|
||||||
|
* @return Operation status
|
||||||
|
* @retval 0 success
|
||||||
|
* @retval !0 error
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
__rte_experimental
|
||||||
|
rte_pie_rt_data_init(struct rte_pie *pie);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Configures a single PIE configuration parameter structure.
|
||||||
|
*
|
||||||
|
* @param pie_cfg [in,out] config pointer to a PIE configuration parameter structure
|
||||||
|
* @param qdelay_ref [in] latency target(milliseconds)
|
||||||
|
* @param dp_update_interval [in] update interval for drop probability (milliseconds)
|
||||||
|
* @param max_burst [in] maximum burst allowance (milliseconds)
|
||||||
|
* @param tailq_th [in] tail drop threshold for the queue (number of packets)
|
||||||
|
*
|
||||||
|
* @return Operation status
|
||||||
|
* @retval 0 success
|
||||||
|
* @retval !0 error
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
__rte_experimental
|
||||||
|
rte_pie_config_init(struct rte_pie_config *pie_cfg,
|
||||||
|
const uint16_t qdelay_ref,
|
||||||
|
const uint16_t dp_update_interval,
|
||||||
|
const uint16_t max_burst,
|
||||||
|
const uint16_t tailq_th);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decides packet enqueue when queue is empty
|
||||||
|
*
|
||||||
|
* Note: packet is never dropped in this particular case.
|
||||||
|
*
|
||||||
|
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
|
||||||
|
* @param pie [in, out] data pointer to PIE runtime data
|
||||||
|
* @param pkt_len [in] packet length in bytes
|
||||||
|
*
|
||||||
|
* @return Operation status
|
||||||
|
* @retval 0 enqueue the packet
|
||||||
|
* @retval !0 drop the packet
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
__rte_experimental
|
||||||
|
rte_pie_enqueue_empty(const struct rte_pie_config *pie_cfg,
|
||||||
|
struct rte_pie *pie,
|
||||||
|
uint32_t pkt_len)
|
||||||
|
{
|
||||||
|
RTE_ASSERT(pkt_len != NULL);
|
||||||
|
|
||||||
|
/* Update the PIE qlen parameter */
|
||||||
|
pie->qlen++;
|
||||||
|
pie->qlen_bytes += pkt_len;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the queue has been idle for a while, turn off PIE and Reset counters
|
||||||
|
*/
|
||||||
|
if ((pie->active == 1) &&
|
||||||
|
(pie->qlen < (pie_cfg->tailq_th * 0.1))) {
|
||||||
|
pie->active = 0;
|
||||||
|
pie->in_measurement = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief make a decision to drop or enqueue a packet based on probability
|
||||||
|
* criteria
|
||||||
|
*
|
||||||
|
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
|
||||||
|
* @param pie [in, out] data pointer to PIE runtime data
|
||||||
|
* @param time [in] current time (measured in cpu cycles)
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
__rte_experimental
|
||||||
|
_calc_drop_probability(const struct rte_pie_config *pie_cfg,
|
||||||
|
struct rte_pie *pie, uint64_t time)
|
||||||
|
{
|
||||||
|
uint64_t qdelay_ref = pie_cfg->qdelay_ref;
|
||||||
|
|
||||||
|
/* Note: can be implemented using integer multiply.
|
||||||
|
* DQ_THRESHOLD is power of 2 value.
|
||||||
|
*/
|
||||||
|
uint64_t current_qdelay = pie->qlen * (pie->avg_dq_time >> 14);
|
||||||
|
|
||||||
|
double p = RTE_ALPHA * (current_qdelay - qdelay_ref) +
|
||||||
|
RTE_BETA * (current_qdelay - pie->qdelay_old);
|
||||||
|
|
||||||
|
if (pie->drop_prob < 0.000001)
|
||||||
|
p = p * 0.00048828125; /* (1/2048) = 0.00048828125 */
|
||||||
|
else if (pie->drop_prob < 0.00001)
|
||||||
|
p = p * 0.001953125; /* (1/512) = 0.001953125 */
|
||||||
|
else if (pie->drop_prob < 0.0001)
|
||||||
|
p = p * 0.0078125; /* (1/128) = 0.0078125 */
|
||||||
|
else if (pie->drop_prob < 0.001)
|
||||||
|
p = p * 0.03125; /* (1/32) = 0.03125 */
|
||||||
|
else if (pie->drop_prob < 0.01)
|
||||||
|
p = p * 0.125; /* (1/8) = 0.125 */
|
||||||
|
else if (pie->drop_prob < 0.1)
|
||||||
|
p = p * 0.5; /* (1/2) = 0.5 */
|
||||||
|
|
||||||
|
if (pie->drop_prob >= 0.1 && p > 0.02)
|
||||||
|
p = 0.02;
|
||||||
|
|
||||||
|
pie->drop_prob += p;
|
||||||
|
|
||||||
|
double qdelay = qdelay_ref * 0.5;
|
||||||
|
|
||||||
|
/* Exponentially decay drop prob when congestion goes away */
|
||||||
|
if ((double)current_qdelay < qdelay && pie->qdelay_old < qdelay)
|
||||||
|
pie->drop_prob *= 0.98; /* 1 - 1/64 is sufficient */
|
||||||
|
|
||||||
|
/* Bound drop probability */
|
||||||
|
if (pie->drop_prob < 0)
|
||||||
|
pie->drop_prob = 0;
|
||||||
|
if (pie->drop_prob > 1)
|
||||||
|
pie->drop_prob = 1;
|
||||||
|
|
||||||
|
pie->qdelay_old = current_qdelay;
|
||||||
|
pie->last_measurement = time;
|
||||||
|
|
||||||
|
uint64_t burst_allowance = pie->burst_allowance - pie_cfg->dp_update_interval;
|
||||||
|
|
||||||
|
pie->burst_allowance = (burst_allowance > 0) ? burst_allowance : 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief make a decision to drop or enqueue a packet based on probability
|
||||||
|
* criteria
|
||||||
|
*
|
||||||
|
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
|
||||||
|
* @param pie [in, out] data pointer to PIE runtime data
|
||||||
|
*
|
||||||
|
* @return operation status
|
||||||
|
* @retval 0 enqueue the packet
|
||||||
|
* @retval 1 drop the packet
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
__rte_experimental
|
||||||
|
_rte_pie_drop(const struct rte_pie_config *pie_cfg,
|
||||||
|
struct rte_pie *pie)
|
||||||
|
{
|
||||||
|
uint64_t rand_value;
|
||||||
|
double qdelay = pie_cfg->qdelay_ref * 0.5;
|
||||||
|
|
||||||
|
/* PIE is active but the queue is not congested: return 0 */
|
||||||
|
if (((pie->qdelay_old < qdelay) && (pie->drop_prob < 0.2)) ||
|
||||||
|
(pie->qlen <= (pie_cfg->tailq_th * 0.1)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (pie->drop_prob == 0)
|
||||||
|
pie->accu_prob = 0;
|
||||||
|
|
||||||
|
/* For practical reasons, drop probability can be further scaled according
|
||||||
|
* to packet size, but one needs to set a bound to avoid unnecessary bias
|
||||||
|
* Random drop
|
||||||
|
*/
|
||||||
|
pie->accu_prob += pie->drop_prob;
|
||||||
|
|
||||||
|
if (pie->accu_prob < 0.85)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (pie->accu_prob >= 8.5)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
rand_value = rte_rand()/RTE_RAND_MAX;
|
||||||
|
|
||||||
|
if ((double)rand_value < pie->drop_prob) {
|
||||||
|
pie->accu_prob = 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* No drop */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decides if new packet should be enqeued or dropped for non-empty queue
|
||||||
|
*
|
||||||
|
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
|
||||||
|
* @param pie [in,out] data pointer to PIE runtime data
|
||||||
|
* @param pkt_len [in] packet length in bytes
|
||||||
|
* @param time [in] current time (measured in cpu cycles)
|
||||||
|
*
|
||||||
|
* @return Operation status
|
||||||
|
* @retval 0 enqueue the packet
|
||||||
|
* @retval 1 drop the packet based on max threshold criterion
|
||||||
|
* @retval 2 drop the packet based on mark probability criterion
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
__rte_experimental
|
||||||
|
rte_pie_enqueue_nonempty(const struct rte_pie_config *pie_cfg,
|
||||||
|
struct rte_pie *pie,
|
||||||
|
uint32_t pkt_len,
|
||||||
|
const uint64_t time)
|
||||||
|
{
|
||||||
|
/* Check queue space against the tail drop threshold */
|
||||||
|
if (pie->qlen >= pie_cfg->tailq_th) {
|
||||||
|
|
||||||
|
pie->accu_prob = 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pie->active) {
|
||||||
|
/* Update drop probability after certain interval */
|
||||||
|
if ((time - pie->last_measurement) >= pie_cfg->dp_update_interval)
|
||||||
|
_calc_drop_probability(pie_cfg, pie, time);
|
||||||
|
|
||||||
|
/* Decide whether packet to be dropped or enqueued */
|
||||||
|
if (_rte_pie_drop(pie_cfg, pie) && pie->burst_allowance == 0)
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* When queue occupancy is over a certain threshold, turn on PIE */
|
||||||
|
if ((pie->active == 0) &&
|
||||||
|
(pie->qlen >= (pie_cfg->tailq_th * 0.1))) {
|
||||||
|
pie->active = 1;
|
||||||
|
pie->qdelay_old = 0;
|
||||||
|
pie->drop_prob = 0;
|
||||||
|
pie->in_measurement = 1;
|
||||||
|
pie->departed_bytes_count = 0;
|
||||||
|
pie->avg_dq_time = 0;
|
||||||
|
pie->last_measurement = time;
|
||||||
|
pie->burst_allowance = pie_cfg->max_burst;
|
||||||
|
pie->accu_prob = 0;
|
||||||
|
pie->start_measurement = time;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* when queue has been idle for a while, turn off PIE and Reset counters */
|
||||||
|
if (pie->active == 1 &&
|
||||||
|
pie->qlen < (pie_cfg->tailq_th * 0.1)) {
|
||||||
|
pie->active = 0;
|
||||||
|
pie->in_measurement = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update PIE qlen parameter */
|
||||||
|
pie->qlen++;
|
||||||
|
pie->qlen_bytes += pkt_len;
|
||||||
|
|
||||||
|
/* No drop */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decides if new packet should be enqeued or dropped
|
||||||
|
* Updates run time data and gives verdict whether to enqueue or drop the packet.
|
||||||
|
*
|
||||||
|
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
|
||||||
|
* @param pie [in,out] data pointer to PIE runtime data
|
||||||
|
* @param qlen [in] queue length
|
||||||
|
* @param pkt_len [in] packet length in bytes
|
||||||
|
* @param time [in] current time stamp (measured in cpu cycles)
|
||||||
|
*
|
||||||
|
* @return Operation status
|
||||||
|
* @retval 0 enqueue the packet
|
||||||
|
* @retval 1 drop the packet based on drop probility criteria
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
__rte_experimental
|
||||||
|
rte_pie_enqueue(const struct rte_pie_config *pie_cfg,
|
||||||
|
struct rte_pie *pie,
|
||||||
|
const unsigned int qlen,
|
||||||
|
uint32_t pkt_len,
|
||||||
|
const uint64_t time)
|
||||||
|
{
|
||||||
|
RTE_ASSERT(pie_cfg != NULL);
|
||||||
|
RTE_ASSERT(pie != NULL);
|
||||||
|
|
||||||
|
if (qlen != 0)
|
||||||
|
return rte_pie_enqueue_nonempty(pie_cfg, pie, pkt_len, time);
|
||||||
|
else
|
||||||
|
return rte_pie_enqueue_empty(pie_cfg, pie, pkt_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief PIE rate estimation method
|
||||||
|
* Called on each packet departure.
|
||||||
|
*
|
||||||
|
* @param pie [in] data pointer to PIE runtime data
|
||||||
|
* @param pkt_len [in] packet length in bytes
|
||||||
|
* @param time [in] current time stamp in cpu cycles
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
__rte_experimental
|
||||||
|
rte_pie_dequeue(struct rte_pie *pie,
|
||||||
|
uint32_t pkt_len,
|
||||||
|
uint64_t time)
|
||||||
|
{
|
||||||
|
/* Dequeue rate estimation */
|
||||||
|
if (pie->in_measurement) {
|
||||||
|
pie->departed_bytes_count += pkt_len;
|
||||||
|
|
||||||
|
/* Start a new measurement cycle when enough packets */
|
||||||
|
if (pie->departed_bytes_count >= RTE_DQ_THRESHOLD) {
|
||||||
|
uint64_t dq_time = time - pie->start_measurement;
|
||||||
|
|
||||||
|
if (pie->avg_dq_time == 0)
|
||||||
|
pie->avg_dq_time = dq_time;
|
||||||
|
else
|
||||||
|
pie->avg_dq_time = dq_time * RTE_DQ_WEIGHT + pie->avg_dq_time
|
||||||
|
* (1 - RTE_DQ_WEIGHT);
|
||||||
|
|
||||||
|
pie->in_measurement = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Start measurement cycle when enough data in the queue */
|
||||||
|
if ((pie->qlen_bytes >= RTE_DQ_THRESHOLD) && (pie->in_measurement == 0)) {
|
||||||
|
pie->in_measurement = 1;
|
||||||
|
pie->start_measurement = time;
|
||||||
|
pie->departed_bytes_count = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* __RTE_PIE_H_INCLUDED__ */
|
@ -89,8 +89,12 @@ struct rte_sched_queue {
|
|||||||
|
|
||||||
struct rte_sched_queue_extra {
|
struct rte_sched_queue_extra {
|
||||||
struct rte_sched_queue_stats stats;
|
struct rte_sched_queue_stats stats;
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
struct rte_red red;
|
RTE_STD_C11
|
||||||
|
union {
|
||||||
|
struct rte_red red;
|
||||||
|
struct rte_pie pie;
|
||||||
|
};
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -183,8 +187,15 @@ struct rte_sched_subport {
|
|||||||
/* Pipe queues size */
|
/* Pipe queues size */
|
||||||
uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
|
bool cman_enabled;
|
||||||
|
enum rte_sched_cman_mode cman;
|
||||||
|
|
||||||
|
RTE_STD_C11
|
||||||
|
union {
|
||||||
|
struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
|
||||||
|
struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
||||||
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Scheduling loop detection */
|
/* Scheduling loop detection */
|
||||||
@ -1078,6 +1089,90 @@ rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
|
|||||||
rte_free(port);
|
rte_free(port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef RTE_SCHED_CMAN
|
||||||
|
static int
|
||||||
|
rte_sched_red_config(struct rte_sched_port *port,
|
||||||
|
struct rte_sched_subport *s,
|
||||||
|
struct rte_sched_subport_params *params,
|
||||||
|
uint32_t n_subports)
|
||||||
|
{
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
|
||||||
|
|
||||||
|
uint32_t j;
|
||||||
|
|
||||||
|
for (j = 0; j < RTE_COLORS; j++) {
|
||||||
|
/* if min/max are both zero, then RED is disabled */
|
||||||
|
if ((params->cman_params->red_params[i][j].min_th |
|
||||||
|
params->cman_params->red_params[i][j].max_th) == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rte_red_config_init(&s->red_config[i][j],
|
||||||
|
params->cman_params->red_params[i][j].wq_log2,
|
||||||
|
params->cman_params->red_params[i][j].min_th,
|
||||||
|
params->cman_params->red_params[i][j].max_th,
|
||||||
|
params->cman_params->red_params[i][j].maxp_inv) != 0) {
|
||||||
|
rte_sched_free_memory(port, n_subports);
|
||||||
|
|
||||||
|
RTE_LOG(NOTICE, SCHED,
|
||||||
|
"%s: RED configuration init fails\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s->cman = RTE_SCHED_CMAN_RED;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
rte_sched_pie_config(struct rte_sched_port *port,
|
||||||
|
struct rte_sched_subport *s,
|
||||||
|
struct rte_sched_subport_params *params,
|
||||||
|
uint32_t n_subports)
|
||||||
|
{
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
|
||||||
|
if (params->cman_params->pie_params[i].tailq_th > params->qsize[i]) {
|
||||||
|
RTE_LOG(NOTICE, SCHED,
|
||||||
|
"%s: PIE tailq threshold incorrect\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rte_pie_config_init(&s->pie_config[i],
|
||||||
|
params->cman_params->pie_params[i].qdelay_ref,
|
||||||
|
params->cman_params->pie_params[i].dp_update_interval,
|
||||||
|
params->cman_params->pie_params[i].max_burst,
|
||||||
|
params->cman_params->pie_params[i].tailq_th) != 0) {
|
||||||
|
rte_sched_free_memory(port, n_subports);
|
||||||
|
|
||||||
|
RTE_LOG(NOTICE, SCHED,
|
||||||
|
"%s: PIE configuration init fails\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s->cman = RTE_SCHED_CMAN_PIE;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
rte_sched_cman_config(struct rte_sched_port *port,
|
||||||
|
struct rte_sched_subport *s,
|
||||||
|
struct rte_sched_subport_params *params,
|
||||||
|
uint32_t n_subports)
|
||||||
|
{
|
||||||
|
if (params->cman_params->cman_mode == RTE_SCHED_CMAN_RED)
|
||||||
|
return rte_sched_red_config(port, s, params, n_subports);
|
||||||
|
|
||||||
|
else if (params->cman_params->cman_mode == RTE_SCHED_CMAN_PIE)
|
||||||
|
return rte_sched_pie_config(port, s, params, n_subports);
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int
|
int
|
||||||
rte_sched_subport_config(struct rte_sched_port *port,
|
rte_sched_subport_config(struct rte_sched_port *port,
|
||||||
uint32_t subport_id,
|
uint32_t subport_id,
|
||||||
@ -1167,29 +1262,17 @@ rte_sched_subport_config(struct rte_sched_port *port,
|
|||||||
s->n_pipe_profiles = params->n_pipe_profiles;
|
s->n_pipe_profiles = params->n_pipe_profiles;
|
||||||
s->n_max_pipe_profiles = params->n_max_pipe_profiles;
|
s->n_max_pipe_profiles = params->n_max_pipe_profiles;
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
|
if (params->cman_params != NULL) {
|
||||||
uint32_t j;
|
s->cman_enabled = true;
|
||||||
|
status = rte_sched_cman_config(port, s, params, n_subports);
|
||||||
for (j = 0; j < RTE_COLORS; j++) {
|
if (status) {
|
||||||
/* if min/max are both zero, then RED is disabled */
|
RTE_LOG(NOTICE, SCHED,
|
||||||
if ((params->red_params[i][j].min_th |
|
"%s: CMAN configuration fails\n", __func__);
|
||||||
params->red_params[i][j].max_th) == 0) {
|
return status;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rte_red_config_init(&s->red_config[i][j],
|
|
||||||
params->red_params[i][j].wq_log2,
|
|
||||||
params->red_params[i][j].min_th,
|
|
||||||
params->red_params[i][j].max_th,
|
|
||||||
params->red_params[i][j].maxp_inv) != 0) {
|
|
||||||
RTE_LOG(NOTICE, SCHED,
|
|
||||||
"%s: RED configuration init fails\n",
|
|
||||||
__func__);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
s->cman_enabled = false;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1718,30 +1801,19 @@ rte_sched_port_update_subport_stats(struct rte_sched_port *port,
|
|||||||
subport->stats.n_bytes_tc[tc_index] += pkt_len;
|
subport->stats.n_bytes_tc[tc_index] += pkt_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
|
||||||
static inline void
|
static inline void
|
||||||
rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
|
rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
|
||||||
struct rte_sched_subport *subport,
|
struct rte_sched_subport *subport,
|
||||||
uint32_t qindex,
|
uint32_t qindex,
|
||||||
struct rte_mbuf *pkt,
|
struct rte_mbuf *pkt,
|
||||||
uint32_t red)
|
__rte_unused uint32_t n_pkts_cman_dropped)
|
||||||
#else
|
|
||||||
static inline void
|
|
||||||
rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
|
|
||||||
struct rte_sched_subport *subport,
|
|
||||||
uint32_t qindex,
|
|
||||||
struct rte_mbuf *pkt,
|
|
||||||
__rte_unused uint32_t red)
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
|
uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
|
||||||
uint32_t pkt_len = pkt->pkt_len;
|
uint32_t pkt_len = pkt->pkt_len;
|
||||||
|
|
||||||
subport->stats.n_pkts_tc_dropped[tc_index] += 1;
|
subport->stats.n_pkts_tc_dropped[tc_index] += 1;
|
||||||
subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
|
subport->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
|
||||||
#ifdef RTE_SCHED_RED
|
subport->stats.n_pkts_cman_dropped[tc_index] += n_pkts_cman_dropped;
|
||||||
subport->stats.n_pkts_red_dropped[tc_index] += red;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -1756,73 +1828,99 @@ rte_sched_port_update_queue_stats(struct rte_sched_subport *subport,
|
|||||||
qe->stats.n_bytes += pkt_len;
|
qe->stats.n_bytes += pkt_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
|
||||||
static inline void
|
static inline void
|
||||||
rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
|
rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
|
||||||
uint32_t qindex,
|
uint32_t qindex,
|
||||||
struct rte_mbuf *pkt,
|
struct rte_mbuf *pkt,
|
||||||
uint32_t red)
|
__rte_unused uint32_t n_pkts_cman_dropped)
|
||||||
#else
|
|
||||||
static inline void
|
|
||||||
rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
|
|
||||||
uint32_t qindex,
|
|
||||||
struct rte_mbuf *pkt,
|
|
||||||
__rte_unused uint32_t red)
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
|
struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
|
||||||
uint32_t pkt_len = pkt->pkt_len;
|
uint32_t pkt_len = pkt->pkt_len;
|
||||||
|
|
||||||
qe->stats.n_pkts_dropped += 1;
|
qe->stats.n_pkts_dropped += 1;
|
||||||
qe->stats.n_bytes_dropped += pkt_len;
|
qe->stats.n_bytes_dropped += pkt_len;
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
qe->stats.n_pkts_red_dropped += red;
|
if (subport->cman_enabled)
|
||||||
|
qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* RTE_SCHED_COLLECT_STATS */
|
#endif /* RTE_SCHED_COLLECT_STATS */
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
#ifdef RTE_SCHED_CMAN
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
rte_sched_port_red_drop(struct rte_sched_port *port,
|
rte_sched_port_cman_drop(struct rte_sched_port *port,
|
||||||
struct rte_sched_subport *subport,
|
struct rte_sched_subport *subport,
|
||||||
struct rte_mbuf *pkt,
|
struct rte_mbuf *pkt,
|
||||||
uint32_t qindex,
|
uint32_t qindex,
|
||||||
uint16_t qlen)
|
uint16_t qlen)
|
||||||
{
|
{
|
||||||
struct rte_sched_queue_extra *qe;
|
if (!subport->cman_enabled)
|
||||||
struct rte_red_config *red_cfg;
|
|
||||||
struct rte_red *red;
|
|
||||||
uint32_t tc_index;
|
|
||||||
enum rte_color color;
|
|
||||||
|
|
||||||
tc_index = rte_sched_port_pipe_tc(port, qindex);
|
|
||||||
color = rte_sched_port_pkt_read_color(pkt);
|
|
||||||
red_cfg = &subport->red_config[tc_index][color];
|
|
||||||
|
|
||||||
if ((red_cfg->min_th | red_cfg->max_th) == 0)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
qe = subport->queue_extra + qindex;
|
struct rte_sched_queue_extra *qe;
|
||||||
red = &qe->red;
|
uint32_t tc_index;
|
||||||
|
|
||||||
return rte_red_enqueue(red_cfg, red, qlen, port->time);
|
tc_index = rte_sched_port_pipe_tc(port, qindex);
|
||||||
|
qe = subport->queue_extra + qindex;
|
||||||
|
|
||||||
|
/* RED */
|
||||||
|
if (subport->cman == RTE_SCHED_CMAN_RED) {
|
||||||
|
struct rte_red_config *red_cfg;
|
||||||
|
struct rte_red *red;
|
||||||
|
enum rte_color color;
|
||||||
|
|
||||||
|
color = rte_sched_port_pkt_read_color(pkt);
|
||||||
|
red_cfg = &subport->red_config[tc_index][color];
|
||||||
|
|
||||||
|
if ((red_cfg->min_th | red_cfg->max_th) == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
red = &qe->red;
|
||||||
|
|
||||||
|
return rte_red_enqueue(red_cfg, red, qlen, port->time);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* PIE */
|
||||||
|
struct rte_pie_config *pie_cfg = &subport->pie_config[tc_index];
|
||||||
|
struct rte_pie *pie = &qe->pie;
|
||||||
|
|
||||||
|
return rte_pie_enqueue(pie_cfg, pie, qlen, pkt->pkt_len, port->time_cpu_cycles);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port,
|
rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port,
|
||||||
struct rte_sched_subport *subport, uint32_t qindex)
|
struct rte_sched_subport *subport, uint32_t qindex)
|
||||||
{
|
{
|
||||||
struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
|
if (subport->cman_enabled) {
|
||||||
struct rte_red *red = &qe->red;
|
struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
|
||||||
|
if (subport->cman == RTE_SCHED_CMAN_RED) {
|
||||||
|
struct rte_red *red = &qe->red;
|
||||||
|
|
||||||
rte_red_mark_queue_empty(red, port->time);
|
rte_red_mark_queue_empty(red, port->time);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
rte_sched_port_pie_dequeue(struct rte_sched_subport *subport,
|
||||||
|
uint32_t qindex, uint32_t pkt_len, uint64_t time) {
|
||||||
|
if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_PIE) {
|
||||||
|
struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
|
||||||
|
struct rte_pie *pie = &qe->pie;
|
||||||
|
|
||||||
|
/* Update queue length */
|
||||||
|
pie->qlen -= 1;
|
||||||
|
pie->qlen_bytes -= pkt_len;
|
||||||
|
|
||||||
|
rte_pie_dequeue(pie, pkt_len, time);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unused,
|
static inline int rte_sched_port_cman_drop(struct rte_sched_port *port __rte_unused,
|
||||||
struct rte_sched_subport *subport __rte_unused,
|
struct rte_sched_subport *subport __rte_unused,
|
||||||
struct rte_mbuf *pkt __rte_unused,
|
struct rte_mbuf *pkt __rte_unused,
|
||||||
uint32_t qindex __rte_unused,
|
uint32_t qindex __rte_unused,
|
||||||
@ -1831,9 +1929,17 @@ static inline int rte_sched_port_red_drop(struct rte_sched_port *port __rte_unus
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define rte_sched_port_set_queue_empty_timestamp(port, subport, qindex)
|
#define rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex)
|
||||||
|
|
||||||
#endif /* RTE_SCHED_RED */
|
static inline void
|
||||||
|
rte_sched_port_pie_dequeue(struct rte_sched_subport *subport __rte_unused,
|
||||||
|
uint32_t qindex __rte_unused,
|
||||||
|
uint32_t pkt_len __rte_unused,
|
||||||
|
uint64_t time __rte_unused) {
|
||||||
|
/* do-nothing when RTE_SCHED_CMAN not defined */
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* RTE_SCHED_CMAN */
|
||||||
|
|
||||||
#ifdef RTE_SCHED_DEBUG
|
#ifdef RTE_SCHED_DEBUG
|
||||||
|
|
||||||
@ -1929,7 +2035,7 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
|
|||||||
qlen = q->qw - q->qr;
|
qlen = q->qw - q->qr;
|
||||||
|
|
||||||
/* Drop the packet (and update drop stats) when queue is full */
|
/* Drop the packet (and update drop stats) when queue is full */
|
||||||
if (unlikely(rte_sched_port_red_drop(port, subport, pkt, qindex, qlen) ||
|
if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
|
||||||
(qlen >= qsize))) {
|
(qlen >= qsize))) {
|
||||||
rte_pktmbuf_free(pkt);
|
rte_pktmbuf_free(pkt);
|
||||||
#ifdef RTE_SCHED_COLLECT_STATS
|
#ifdef RTE_SCHED_COLLECT_STATS
|
||||||
@ -2402,6 +2508,7 @@ grinder_schedule(struct rte_sched_port *port,
|
|||||||
{
|
{
|
||||||
struct rte_sched_grinder *grinder = subport->grinder + pos;
|
struct rte_sched_grinder *grinder = subport->grinder + pos;
|
||||||
struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
|
struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
|
||||||
|
uint32_t qindex = grinder->qindex[grinder->qpos];
|
||||||
struct rte_mbuf *pkt = grinder->pkt;
|
struct rte_mbuf *pkt = grinder->pkt;
|
||||||
uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
|
uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
|
||||||
uint32_t be_tc_active;
|
uint32_t be_tc_active;
|
||||||
@ -2421,15 +2528,16 @@ grinder_schedule(struct rte_sched_port *port,
|
|||||||
(pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
|
(pkt_len * grinder->wrr_cost[grinder->qpos]) & be_tc_active;
|
||||||
|
|
||||||
if (queue->qr == queue->qw) {
|
if (queue->qr == queue->qw) {
|
||||||
uint32_t qindex = grinder->qindex[grinder->qpos];
|
|
||||||
|
|
||||||
rte_bitmap_clear(subport->bmp, qindex);
|
rte_bitmap_clear(subport->bmp, qindex);
|
||||||
grinder->qmask &= ~(1 << grinder->qpos);
|
grinder->qmask &= ~(1 << grinder->qpos);
|
||||||
if (be_tc_active)
|
if (be_tc_active)
|
||||||
grinder->wrr_mask[grinder->qpos] = 0;
|
grinder->wrr_mask[grinder->qpos] = 0;
|
||||||
rte_sched_port_set_queue_empty_timestamp(port, subport, qindex);
|
|
||||||
|
rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rte_sched_port_pie_dequeue(subport, qindex, pkt_len, port->time_cpu_cycles);
|
||||||
|
|
||||||
/* Reset pipe loop detection */
|
/* Reset pipe loop detection */
|
||||||
subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
|
subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
|
||||||
grinder->productive = 1;
|
grinder->productive = 1;
|
||||||
|
@ -61,10 +61,9 @@ extern "C" {
|
|||||||
#include <rte_mbuf.h>
|
#include <rte_mbuf.h>
|
||||||
#include <rte_meter.h>
|
#include <rte_meter.h>
|
||||||
|
|
||||||
/** Random Early Detection (RED) */
|
/** Congestion Management */
|
||||||
#ifdef RTE_SCHED_RED
|
|
||||||
#include "rte_red.h"
|
#include "rte_red.h"
|
||||||
#endif
|
#include "rte_pie.h"
|
||||||
|
|
||||||
/** Maximum number of queues per pipe.
|
/** Maximum number of queues per pipe.
|
||||||
* Note that the multiple queues (power of 2) can only be assigned to
|
* Note that the multiple queues (power of 2) can only be assigned to
|
||||||
@ -110,6 +109,28 @@ extern "C" {
|
|||||||
#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
|
#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Congestion Management (CMAN) mode
|
||||||
|
*
|
||||||
|
* This is used for controlling the admission of packets into a packet queue or
|
||||||
|
* group of packet queues on congestion.
|
||||||
|
*
|
||||||
|
* The *Random Early Detection (RED)* algorithm works by proactively dropping
|
||||||
|
* more and more input packets as the queue occupancy builds up. When the queue
|
||||||
|
* is full or almost full, RED effectively works as *tail drop*. The *Weighted
|
||||||
|
* RED* algorithm uses a separate set of RED thresholds for each packet color.
|
||||||
|
*
|
||||||
|
* Similar to RED, Proportional Integral Controller Enhanced (PIE) randomly
|
||||||
|
* drops a packet at the onset of the congestion and tries to control the
|
||||||
|
* latency around the target value. The congestion detection, however, is based
|
||||||
|
* on the queueing latency instead of the queue length like RED. For more
|
||||||
|
* information, refer RFC8033.
|
||||||
|
*/
|
||||||
|
enum rte_sched_cman_mode {
|
||||||
|
RTE_SCHED_CMAN_RED, /**< Random Early Detection (RED) */
|
||||||
|
RTE_SCHED_CMAN_PIE, /**< Proportional Integral Controller Enhanced (PIE) */
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pipe configuration parameters. The period and credits_per_period
|
* Pipe configuration parameters. The period and credits_per_period
|
||||||
* parameters are measured in bytes, with one byte meaning the time
|
* parameters are measured in bytes, with one byte meaning the time
|
||||||
@ -139,6 +160,22 @@ struct rte_sched_pipe_params {
|
|||||||
uint8_t wrr_weights[RTE_SCHED_BE_QUEUES_PER_PIPE];
|
uint8_t wrr_weights[RTE_SCHED_BE_QUEUES_PER_PIPE];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Congestion Management configuration parameters.
|
||||||
|
*/
|
||||||
|
struct rte_sched_cman_params {
|
||||||
|
/** Congestion Management mode */
|
||||||
|
enum rte_sched_cman_mode cman_mode;
|
||||||
|
|
||||||
|
union {
|
||||||
|
/** RED parameters */
|
||||||
|
struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
|
||||||
|
|
||||||
|
/** PIE parameters */
|
||||||
|
struct rte_pie_params pie_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Subport configuration parameters. The period and credits_per_period
|
* Subport configuration parameters. The period and credits_per_period
|
||||||
* parameters are measured in bytes, with one byte meaning the time
|
* parameters are measured in bytes, with one byte meaning the time
|
||||||
@ -174,10 +211,11 @@ struct rte_sched_subport_params {
|
|||||||
/** Max allowed profiles in the pipe profile table */
|
/** Max allowed profiles in the pipe profile table */
|
||||||
uint32_t n_max_pipe_profiles;
|
uint32_t n_max_pipe_profiles;
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
/** Congestion Management parameters
|
||||||
/** RED parameters */
|
* If NULL the congestion management is disabled for the subport,
|
||||||
struct rte_red_params red_params[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
|
* otherwise proper parameters need to be provided.
|
||||||
#endif
|
*/
|
||||||
|
struct rte_sched_cman_params *cman_params;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rte_sched_subport_profile_params {
|
struct rte_sched_subport_profile_params {
|
||||||
@ -208,10 +246,8 @@ struct rte_sched_subport_stats {
|
|||||||
/** Number of bytes dropped for each traffic class */
|
/** Number of bytes dropped for each traffic class */
|
||||||
uint64_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
uint64_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
/** Number of packets dropped by congestion management scheme */
|
||||||
/** Number of packets dropped by red */
|
uint64_t n_pkts_cman_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
||||||
uint64_t n_pkts_red_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Queue statistics */
|
/** Queue statistics */
|
||||||
@ -222,10 +258,8 @@ struct rte_sched_queue_stats {
|
|||||||
/** Packets dropped */
|
/** Packets dropped */
|
||||||
uint64_t n_pkts_dropped;
|
uint64_t n_pkts_dropped;
|
||||||
|
|
||||||
#ifdef RTE_SCHED_RED
|
/** Packets dropped by congestion management scheme */
|
||||||
/** Packets dropped by RED */
|
uint64_t n_pkts_cman_dropped;
|
||||||
uint64_t n_pkts_red_dropped;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/** Bytes successfully written */
|
/** Bytes successfully written */
|
||||||
uint64_t n_bytes;
|
uint64_t n_bytes;
|
||||||
|
@ -30,4 +30,8 @@ EXPERIMENTAL {
|
|||||||
|
|
||||||
# added in 20.11
|
# added in 20.11
|
||||||
rte_sched_port_subport_profile_add;
|
rte_sched_port_subport_profile_add;
|
||||||
|
|
||||||
|
# added in 21.11
|
||||||
|
rte_pie_rt_data_init;
|
||||||
|
rte_pie_config_init;
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user