sched: minor changes

Do not define grinder_credits_check() if it is not used.

Signed-off-by: Intel
This commit is contained in:
Intel 2013-09-18 12:00:00 +02:00 committed by Thomas Monjalon
parent 5140eb165f
commit 0260e5e43f
5 changed files with 23 additions and 17 deletions

View File

@ -58,7 +58,7 @@ extern "C" {
* @param d
* Precision for the rational approximation
* @param p
* Pointer to pre-allocated space where the numerator of the rational
* Pointer to pre-allocated space where the numerator of the rational
* approximation will be stored when operation is successful
* @param q
* Pointer to pre-allocated space where the denominator of the rational

View File

@ -528,10 +528,10 @@ __rte_bitmap_scan_read(struct rte_bitmap *bmp, uint32_t *pos, uint64_t *slab)
* @param slab
* When function call returns 1, slab contains the value of the entire 64-bit
* slab where the bit indicated by pos is located. Slabs are always 64-bit
* aligned, so the position of the first bit of the slab (this bit is not
* aligned, so the position of the first bit of the slab (this bit is not
* necessarily set) is pos / 64. Once a slab has been returned by the bitmap
* scan operation, the internal pointers of the bitmap are updated to point
* after this slab, so the same slab will not be returned again if it
* after this slab, so the same slab will not be returned again if it
* contains more than one bit which is set. When function call returns 0,
* slab is not modified.
* @return

View File

@ -78,6 +78,7 @@ if (!(exp)) { \
/**
* Externs
*
*/
extern uint32_t rte_red_rand_val;
extern uint32_t rte_red_rand_seed;
@ -86,6 +87,7 @@ extern uint16_t rte_red_pow2_frac_inv[16];
/**
* RED configuration parameters passed by user
*
*/
struct rte_red_params {
uint16_t min_th; /**< Minimum threshold for queue (max_th) */

View File

@ -1274,11 +1274,11 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint
* of prefetching the data structures. The naming convention is presented in the
* diagram below:
*
* p00 _______ p10 _______ p20 _______ p30 _______
* p00 _______ p10 _______ p20 _______ p30 _______
* ----->| |----->| |----->| |----->| |----->
* | 0 | | 1 | | 2 | | 3 |
* | 0 | | 1 | | 2 | | 3 |
* ----->|_______|----->|_______|----->|_______|----->|_______|----->
* p01 p11 p21 p31
* p01 p11 p21 p31
*
***/
int
@ -1568,6 +1568,8 @@ grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
#endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */
#if RTE_SCHED_TS_CREDITS_CHECK
#ifndef RTE_SCHED_SUBPORT_TC_OV
static inline int
@ -1647,6 +1649,8 @@ grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
#endif /* RTE_SCHED_SUBPORT_TC_OV */
#endif /* RTE_SCHED_TS_CREDITS_CHECK */
static inline int
grinder_schedule(struct rte_sched_port *port, uint32_t pos)
{

View File

@ -47,7 +47,7 @@ extern "C" {
* for the current network node.
*
* The scheduler supports thousands of packet queues grouped under a 5-level hierarchy:
* 1. Port:
* 1. Port:
* - Typical usage: output Ethernet port;
* - Multiple ports are scheduled in round robin order with equal priority;
* 2. Subport:
@ -56,11 +56,11 @@ extern "C" {
* - Upper limit enforced per traffic class at subport level;
* - Lower priority traffic classes able to reuse subport bandwidth currently
* unused by higher priority traffic classes of the same subport;
* - When any subport traffic class is oversubscribed (configuration time
* event), the usage of subport member pipes with high demand for that
* traffic class pipes is truncated to a dynamically adjusted value with no
* - When any subport traffic class is oversubscribed (configuration time
* event), the usage of subport member pipes with high demand for that
* traffic class pipes is truncated to a dynamically adjusted value with no
* impact to low demand pipes;
* 3. Pipe:
* 3. Pipe:
* - Typical usage: individual user/subscriber;
* - Traffic shaping using the token bucket algorithm (one bucket per pipe);
* 4. Traffic class:
@ -69,9 +69,9 @@ extern "C" {
* - Lower priority traffic classes able to reuse pipe bandwidth currently
* unused by higher priority traffic classes of the same pipe;
* 5. Queue:
* - Typical usage: queue hosting packets from one or multiple connections
* - Typical usage: queue hosting packets from one or multiple connections
* of same traffic class belonging to the same user;
* - Weighted Round Robin (WRR) is used to service the queues within same
* - Weighted Round Robin (WRR) is used to service the queues within same
* pipe traffic class.
*
***/
@ -293,7 +293,7 @@ rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params);
* @param subport_id
* Subport ID
* @param stats
* Pointer to pre-allocated subport statistics structure where the statistics
* Pointer to pre-allocated subport statistics structure where the statistics
* counters should be stored
* @param tc_ov
* Pointer to pre-allocated 4-entry array where the oversubscription status for
@ -315,7 +315,7 @@ rte_sched_subport_read_stats(struct rte_sched_port *port,
* @param queue_id
* Queue ID within port scheduler
* @param stats
* Pointer to pre-allocated subport statistics structure where the statistics
* Pointer to pre-allocated subport statistics structure where the statistics
* counters should be stored
* @param qlen
* Pointer to pre-allocated variable where the current queue length should be stored.
@ -376,7 +376,7 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
* Traffic class ID within pipe (0 .. 3)
* @param queue
* Queue ID within pipe traffic class (0 .. 3)
*
*
*/
static inline void
rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
@ -425,7 +425,7 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint
* @param port
* Handle to port scheduler instance
* @param pkts
* Pre-allocated packet descriptor array where the packets dequeued from the port
* Pre-allocated packet descriptor array where the packets dequeued from the port
* scheduler should be stored
* @param n_pkts
* Number of packets to dequeue from the port scheduler