sched: enable statistics unconditionally

Removed RTE_SCHED_COLLECT_STATS flag from rte_config.h.
Stats collection is always enabled.

Signed-off-by: Megha Ajmera <megha.ajmera@intel.com>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
This commit is contained in:
Megha Ajmera 2022-02-22 12:57:44 +00:00 committed by Thomas Monjalon
parent ec487c1896
commit 199d788e4b
3 changed files with 0 additions and 19 deletions

View File

@ -90,7 +90,6 @@
/* rte_sched defines */
#undef RTE_SCHED_CMAN
#undef RTE_SCHED_COLLECT_STATS
#undef RTE_SCHED_SUBPORT_TC_OV
/* rte_graph defines */

View File

@ -39,12 +39,6 @@ The application is located in the ``qos_sched`` sub-directory.
This application is intended as a linux only.
.. note::
To get statistics on the sample app using the command line interface as described in the next section,
DPDK must be compiled defining *RTE_SCHED_COLLECT_STATS*, which can be done by changing the relevant
entry in the ``config/rte_config.h`` file.
.. note::
Number of grinders is currently set to 8.

View File

@ -1778,8 +1778,6 @@ rte_sched_port_queue_is_empty(struct rte_sched_subport *subport,
#endif /* RTE_SCHED_DEBUG */
#ifdef RTE_SCHED_COLLECT_STATS
static inline void
rte_sched_port_update_subport_stats(struct rte_sched_port *port,
struct rte_sched_subport *subport,
@ -1837,8 +1835,6 @@ rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
#endif
}
#endif /* RTE_SCHED_COLLECT_STATS */
#ifdef RTE_SCHED_CMAN
static inline int
@ -1977,18 +1973,14 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_subport *subport,
struct rte_mbuf *pkt, uint32_t subport_qmask)
{
struct rte_sched_queue *q;
#ifdef RTE_SCHED_COLLECT_STATS
struct rte_sched_queue_extra *qe;
#endif
uint32_t qindex = rte_mbuf_sched_queue_get(pkt);
uint32_t subport_queue_id = subport_qmask & qindex;
q = subport->queue + subport_queue_id;
rte_prefetch0(q);
#ifdef RTE_SCHED_COLLECT_STATS
qe = subport->queue_extra + subport_queue_id;
rte_prefetch0(qe);
#endif
return subport_queue_id;
}
@ -2030,12 +2022,10 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
if (unlikely(rte_sched_port_cman_drop(port, subport, pkt, qindex, qlen) ||
(qlen >= qsize))) {
rte_pktmbuf_free(pkt);
#ifdef RTE_SCHED_COLLECT_STATS
rte_sched_port_update_subport_stats_on_drop(port, subport,
qindex, pkt, qlen < qsize);
rte_sched_port_update_queue_stats_on_drop(subport, qindex, pkt,
qlen < qsize);
#endif
return 0;
}
@ -2047,10 +2037,8 @@ rte_sched_port_enqueue_qwa(struct rte_sched_port *port,
rte_bitmap_set(subport->bmp, qindex);
/* Statistics */
#ifdef RTE_SCHED_COLLECT_STATS
rte_sched_port_update_subport_stats(port, subport, qindex, pkt);
rte_sched_port_update_queue_stats(subport, qindex, pkt);
#endif
return 1;
}