ring: remove the yield when waiting for tail update

There was a compile time setting to enable a ring to yield when
it entered a loop in mp or mc rings waiting for the tail pointer update.
Build time settings are not recommended for enabling/disabling features,
and since this was off by default, remove it completely. If needed, a
runtime enabled equivalent can be used.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
Bruce Richardson 2017-03-29 16:21:21 +01:00 committed by Thomas Monjalon
parent 8c82198978
commit 82cb88375c
4 changed files with 7 additions and 35 deletions

View File

@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
# Compile librte_mempool

View File

@ -352,11 +352,6 @@ Known Issues
3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.
``RTE_RING_PAUSE_REP_COUNT`` is defined for rte_ring to reduce contention. It's mainly for case 2, a yield is issued after number of times pause repeat.
It adds a sched_yield() syscall if the thread spins for too long while waiting on the other thread to finish its operations on the ring.
This gives the preempted thread a chance to proceed and finish with the ring enqueue/dequeue operation.
+ rte_timer
Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.

View File

@ -134,6 +134,7 @@ API Changes
* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
* removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``
ABI Changes
-----------

View File

@ -114,11 +114,6 @@ enum rte_ring_queue_behavior {
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)
#ifndef RTE_RING_PAUSE_REP_COUNT
#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
* if RTE_RING_PAUSE_REP not defined. */
#endif
struct rte_memzone; /* forward declaration, so as not to require memzone.h */
#if RTE_CACHE_LINE_SIZE < 128
@ -393,7 +388,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
uint32_t cons_tail, free_entries;
const unsigned max = n;
int success;
unsigned i, rep = 0;
unsigned int i;
uint32_t mask = r->mask;
int ret;
@ -447,18 +442,9 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
while (unlikely(r->prod.tail != prod_head)) {
while (unlikely(r->prod.tail != prod_head))
rte_pause();
/* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
* for other thread finish. It gives pre-empted thread a chance
* to proceed and finish with ring dequeue operation. */
if (RTE_RING_PAUSE_REP_COUNT &&
++rep == RTE_RING_PAUSE_REP_COUNT) {
rep = 0;
sched_yield();
}
}
r->prod.tail = prod_next;
return ret;
}
@ -491,7 +477,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
{
uint32_t prod_head, cons_tail;
uint32_t prod_next, free_entries;
unsigned i;
unsigned int i;
uint32_t mask = r->mask;
int ret;
@ -568,7 +554,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
uint32_t cons_next, entries;
const unsigned max = n;
int success;
unsigned i, rep = 0;
unsigned int i;
uint32_t mask = r->mask;
/* Avoid the unnecessary cmpset operation below, which is also
@ -613,18 +599,9 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* If there are other dequeues in progress that preceded us,
* we need to wait for them to complete
*/
while (unlikely(r->cons.tail != cons_head)) {
while (unlikely(r->cons.tail != cons_head))
rte_pause();
/* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
* for other thread finish. It gives pre-empted thread a chance
* to proceed and finish with ring dequeue operation. */
if (RTE_RING_PAUSE_REP_COUNT &&
++rep == RTE_RING_PAUSE_REP_COUNT) {
rep = 0;
sched_yield();
}
}
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@ -659,7 +636,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
{
uint32_t cons_head, prod_tail;
uint32_t cons_next, entries;
unsigned i;
unsigned int i;
uint32_t mask = r->mask;
cons_head = r->cons.head;