ring: remove debug setting

The debug option only provided statistics to the user, most of
which could be tracked by the application itself. Remove this as a
compile time option, and feature, simplifying the code.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
Bruce Richardson 2017-03-29 16:21:20 +01:00 committed by Thomas Monjalon
parent d1e138e1b0
commit 8c82198978
6 changed files with 13 additions and 544 deletions

View File

@ -452,7 +452,6 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
CONFIG_RTE_LIBRTE_RING_DEBUG=n
CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#

View File

@ -110,13 +110,6 @@ Once an enqueue operation reaches the high water mark, the producer is notified,
This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.
Debug
~~~~~
When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set),
the library stores some per-ring statistic counters about the number of enqueues/dequeues.
These statistics are per-core to avoid concurrent accesses or atomic operations.
Use Cases
---------

View File

@ -133,6 +133,7 @@ API Changes
have been made to it:
* removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``
* removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``
ABI Changes
-----------

View File

@ -131,12 +131,6 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
RTE_CACHE_LINE_MASK) != 0);
#endif
/* init the ring structure */
memset(r, 0, sizeof(*r));
@ -284,11 +278,6 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats sum;
unsigned lcore_id;
#endif
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
fprintf(f, " size=%"PRIu32"\n", r->size);
@ -302,36 +291,6 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " watermark=0\n");
else
fprintf(f, " watermark=%"PRIu32"\n", r->watermark);
/* sum and dump statistics */
#ifdef RTE_LIBRTE_RING_DEBUG
memset(&sum, 0, sizeof(sum));
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
}
fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
#else
fprintf(f, " no statistics available\n");
#endif
}
/* dump the status of all rings on the console */

View File

@ -109,24 +109,6 @@ enum rte_ring_queue_behavior {
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};
#ifdef RTE_LIBRTE_RING_DEBUG
/**
* A structure that stores the ring statistics (per-lcore).
*/
struct rte_ring_debug_stats {
uint64_t enq_success_bulk; /**< Successful enqueues number. */
uint64_t enq_success_objs; /**< Objects successfully enqueued. */
uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
uint64_t enq_fail_bulk; /**< Failed enqueues number. */
uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
uint64_t deq_success_bulk; /**< Successful dequeues number. */
uint64_t deq_success_objs; /**< Objects successfully dequeued. */
uint64_t deq_fail_bulk; /**< Failed dequeues number. */
uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
} __rte_cache_aligned;
#endif
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
@ -184,10 +166,6 @@ struct rte_ring {
/** Ring consumer status. */
struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
#endif
void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
* not volatile so need to be careful
* about compiler re-ordering */
@ -198,27 +176,6 @@ struct rte_ring {
#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
/**
* @internal When debug is enabled, store ring statistics.
* @param r
* A pointer to the ring.
* @param name
* The name of the statistics field to increment in the ring.
* @param n
* The number to add to the object-oriented statistics.
*/
#ifdef RTE_LIBRTE_RING_DEBUG
#define __RING_STAT_ADD(r, name, n) do { \
unsigned __lcore_id = rte_lcore_id(); \
if (__lcore_id < RTE_MAX_LCORE) { \
r->stats[__lcore_id].name##_objs += n; \
r->stats[__lcore_id].name##_bulk += 1; \
} \
} while(0)
#else
#define __RING_STAT_ADD(r, name, n) do {} while(0)
#endif
/**
* Calculate the memory size needed for a ring
*
@ -460,17 +417,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED) {
__RING_STAT_ADD(r, enq_fail, n);
if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
}
else {
/* No free entry available */
if (unlikely(free_entries == 0)) {
__RING_STAT_ADD(r, enq_fail, n);
if (unlikely(free_entries == 0))
return 0;
}
n = free_entries;
}
}
@ -485,15 +437,11 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
}
else {
else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
__RING_STAT_ADD(r, enq_success, n);
}
/*
* If there are other enqueues in progress that preceded us,
@ -557,17 +505,12 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
/* check that we have enough room in ring */
if (unlikely(n > free_entries)) {
if (behavior == RTE_RING_QUEUE_FIXED) {
__RING_STAT_ADD(r, enq_fail, n);
if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOBUFS;
}
else {
/* No free entry available */
if (unlikely(free_entries == 0)) {
__RING_STAT_ADD(r, enq_fail, n);
if (unlikely(free_entries == 0))
return 0;
}
n = free_entries;
}
}
@ -580,15 +523,11 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
rte_smp_wmb();
/* if we exceed the watermark */
if (unlikely(((mask + 1) - free_entries + n) > r->watermark)) {
if (unlikely(((mask + 1) - free_entries + n) > r->watermark))
ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
(int)(n | RTE_RING_QUOT_EXCEED);
__RING_STAT_ADD(r, enq_quota, n);
}
else {
else
ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
__RING_STAT_ADD(r, enq_success, n);
}
r->prod.tail = prod_next;
return ret;
@ -652,16 +591,11 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
/* Set the actual entries for dequeue */
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED) {
__RING_STAT_ADD(r, deq_fail, n);
if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
}
else {
if (unlikely(entries == 0)){
__RING_STAT_ADD(r, deq_fail, n);
if (unlikely(entries == 0))
return 0;
}
n = entries;
}
}
@ -691,7 +625,6 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
sched_yield();
}
}
__RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
@ -738,16 +671,11 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
entries = prod_tail - cons_head;
if (n > entries) {
if (behavior == RTE_RING_QUEUE_FIXED) {
__RING_STAT_ADD(r, deq_fail, n);
if (behavior == RTE_RING_QUEUE_FIXED)
return -ENOENT;
}
else {
if (unlikely(entries == 0)){
__RING_STAT_ADD(r, deq_fail, n);
if (unlikely(entries == 0))
return 0;
}
n = entries;
}
}
@ -759,7 +687,6 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
DEQUEUE_PTRS();
rte_smp_rmb();
__RING_STAT_ADD(r, deq_success, n);
r->cons.tail = cons_next;
return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
}

View File

@ -763,412 +763,6 @@ test_ring_burst_basic(void)
return -1;
}
static int
test_ring_stats(void)
{
#ifndef RTE_LIBRTE_RING_DEBUG
printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
return 0;
#else
void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
int ret;
unsigned i;
unsigned num_items = 0;
unsigned failed_enqueue_ops = 0;
unsigned failed_enqueue_items = 0;
unsigned failed_dequeue_ops = 0;
unsigned failed_dequeue_items = 0;
unsigned last_enqueue_ops = 0;
unsigned last_enqueue_items = 0;
unsigned last_quota_ops = 0;
unsigned last_quota_items = 0;
unsigned lcore_id = rte_lcore_id();
struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
printf("Test the ring stats.\n");
/* Reset the watermark in case it was set in another test. */
rte_ring_set_water_mark(r, 0);
/* Reset the ring stats. */
memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
/* Allocate some dummy object pointers. */
src = malloc(RING_SIZE*2*sizeof(void *));
if (src == NULL)
goto fail;
for (i = 0; i < RING_SIZE*2 ; i++) {
src[i] = (void *)(unsigned long)i;
}
/* Allocate some memory for copied objects. */
dst = malloc(RING_SIZE*2*sizeof(void *));
if (dst == NULL)
goto fail;
memset(dst, 0, RING_SIZE*2*sizeof(void *));
/* Set the head and tail pointers. */
cur_src = src;
cur_dst = dst;
/* Do Enqueue tests. */
printf("Test the dequeue stats.\n");
/* Fill the ring up to RING_SIZE -1. */
printf("Fill the ring.\n");
for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
cur_src += MAX_BULK;
}
/* Adjust for final enqueue = MAX_BULK -1. */
cur_src--;
printf("Verify that the ring is full.\n");
if (rte_ring_full(r) != 1)
goto fail;
printf("Verify the enqueue success stats.\n");
/* Stats should match above enqueue operations to fill the ring. */
if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
goto fail;
/* Current max objects is RING_SIZE -1. */
if (ring_stats->enq_success_objs != RING_SIZE -1)
goto fail;
/* Shouldn't have any failures yet. */
if (ring_stats->enq_fail_bulk != 0)
goto fail;
if (ring_stats->enq_fail_objs != 0)
goto fail;
printf("Test stats for SP burst enqueue to a full ring.\n");
num_items = 2;
ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;
failed_enqueue_ops += 1;
failed_enqueue_items += num_items;
/* The enqueue should have failed. */
if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
goto fail;
if (ring_stats->enq_fail_objs != failed_enqueue_items)
goto fail;
printf("Test stats for SP bulk enqueue to a full ring.\n");
num_items = 4;
ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
if (ret != -ENOBUFS)
goto fail;
failed_enqueue_ops += 1;
failed_enqueue_items += num_items;
/* The enqueue should have failed. */
if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
goto fail;
if (ring_stats->enq_fail_objs != failed_enqueue_items)
goto fail;
printf("Test stats for MP burst enqueue to a full ring.\n");
num_items = 8;
ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;
failed_enqueue_ops += 1;
failed_enqueue_items += num_items;
/* The enqueue should have failed. */
if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
goto fail;
if (ring_stats->enq_fail_objs != failed_enqueue_items)
goto fail;
printf("Test stats for MP bulk enqueue to a full ring.\n");
num_items = 16;
ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
if (ret != -ENOBUFS)
goto fail;
failed_enqueue_ops += 1;
failed_enqueue_items += num_items;
/* The enqueue should have failed. */
if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
goto fail;
if (ring_stats->enq_fail_objs != failed_enqueue_items)
goto fail;
/* Do Dequeue tests. */
printf("Test the dequeue stats.\n");
printf("Empty the ring.\n");
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
}
/* There was only RING_SIZE -1 objects to dequeue. */
cur_dst++;
printf("Verify ring is empty.\n");
if (1 != rte_ring_empty(r))
goto fail;
printf("Verify the dequeue success stats.\n");
/* Stats should match above dequeue operations. */
if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
goto fail;
/* Objects dequeued is RING_SIZE -1. */
if (ring_stats->deq_success_objs != RING_SIZE -1)
goto fail;
/* Shouldn't have any dequeue failure stats yet. */
if (ring_stats->deq_fail_bulk != 0)
goto fail;
printf("Test stats for SC burst dequeue with an empty ring.\n");
num_items = 2;
ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;
failed_dequeue_ops += 1;
failed_dequeue_items += num_items;
/* The dequeue should have failed. */
if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
goto fail;
if (ring_stats->deq_fail_objs != failed_dequeue_items)
goto fail;
printf("Test stats for SC bulk dequeue with an empty ring.\n");
num_items = 4;
ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
if (ret != -ENOENT)
goto fail;
failed_dequeue_ops += 1;
failed_dequeue_items += num_items;
/* The dequeue should have failed. */
if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
goto fail;
if (ring_stats->deq_fail_objs != failed_dequeue_items)
goto fail;
printf("Test stats for MC burst dequeue with an empty ring.\n");
num_items = 8;
ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
if ((ret & RTE_RING_SZ_MASK) != 0)
goto fail;
failed_dequeue_ops += 1;
failed_dequeue_items += num_items;
/* The dequeue should have failed. */
if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
goto fail;
if (ring_stats->deq_fail_objs != failed_dequeue_items)
goto fail;
printf("Test stats for MC bulk dequeue with an empty ring.\n");
num_items = 16;
ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
if (ret != -ENOENT)
goto fail;
failed_dequeue_ops += 1;
failed_dequeue_items += num_items;
/* The dequeue should have failed. */
if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
goto fail;
if (ring_stats->deq_fail_objs != failed_dequeue_items)
goto fail;
printf("Test total enqueue/dequeue stats.\n");
/* At this point the enqueue and dequeue stats should be the same. */
if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
goto fail;
if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
goto fail;
if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
goto fail;
if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
goto fail;
/* Watermark Tests. */
printf("Test the watermark/quota stats.\n");
printf("Verify the initial watermark stats.\n");
/* Watermark stats should be 0 since there is no watermark. */
if (ring_stats->enq_quota_bulk != 0)
goto fail;
if (ring_stats->enq_quota_objs != 0)
goto fail;
/* Set a watermark. */
rte_ring_set_water_mark(r, 16);
/* Reset pointers. */
cur_src = src;
cur_dst = dst;
last_enqueue_ops = ring_stats->enq_success_bulk;
last_enqueue_items = ring_stats->enq_success_objs;
printf("Test stats for SP burst enqueue below watermark.\n");
num_items = 8;
ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
if ((ret & RTE_RING_SZ_MASK) != num_items)
goto fail;
/* Watermark stats should still be 0. */
if (ring_stats->enq_quota_bulk != 0)
goto fail;
if (ring_stats->enq_quota_objs != 0)
goto fail;
/* Success stats should have increased. */
if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
goto fail;
if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
goto fail;
last_enqueue_ops = ring_stats->enq_success_bulk;
last_enqueue_items = ring_stats->enq_success_objs;
printf("Test stats for SP burst enqueue at watermark.\n");
num_items = 8;
ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
if ((ret & RTE_RING_SZ_MASK) != num_items)
goto fail;
/* Watermark stats should have changed. */
if (ring_stats->enq_quota_bulk != 1)
goto fail;
if (ring_stats->enq_quota_objs != num_items)
goto fail;
last_quota_ops = ring_stats->enq_quota_bulk;
last_quota_items = ring_stats->enq_quota_objs;
printf("Test stats for SP burst enqueue above watermark.\n");
num_items = 1;
ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
if ((ret & RTE_RING_SZ_MASK) != num_items)
goto fail;
/* Watermark stats should have changed. */
if (ring_stats->enq_quota_bulk != last_quota_ops +1)
goto fail;
if (ring_stats->enq_quota_objs != last_quota_items + num_items)
goto fail;
last_quota_ops = ring_stats->enq_quota_bulk;
last_quota_items = ring_stats->enq_quota_objs;
printf("Test stats for MP burst enqueue above watermark.\n");
num_items = 2;
ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
if ((ret & RTE_RING_SZ_MASK) != num_items)
goto fail;
/* Watermark stats should have changed. */
if (ring_stats->enq_quota_bulk != last_quota_ops +1)
goto fail;
if (ring_stats->enq_quota_objs != last_quota_items + num_items)
goto fail;
last_quota_ops = ring_stats->enq_quota_bulk;
last_quota_items = ring_stats->enq_quota_objs;
printf("Test stats for SP bulk enqueue above watermark.\n");
num_items = 4;
ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
if (ret != -EDQUOT)
goto fail;
/* Watermark stats should have changed. */
if (ring_stats->enq_quota_bulk != last_quota_ops +1)
goto fail;
if (ring_stats->enq_quota_objs != last_quota_items + num_items)
goto fail;
last_quota_ops = ring_stats->enq_quota_bulk;
last_quota_items = ring_stats->enq_quota_objs;
printf("Test stats for MP bulk enqueue above watermark.\n");
num_items = 8;
ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
if (ret != -EDQUOT)
goto fail;
/* Watermark stats should have changed. */
if (ring_stats->enq_quota_bulk != last_quota_ops +1)
goto fail;
if (ring_stats->enq_quota_objs != last_quota_items + num_items)
goto fail;
printf("Test watermark success stats.\n");
/* Success stats should be same as last non-watermarked enqueue. */
if (ring_stats->enq_success_bulk != last_enqueue_ops)
goto fail;
if (ring_stats->enq_success_objs != last_enqueue_items)
goto fail;
/* Cleanup. */
/* Empty the ring. */
for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
cur_dst += MAX_BULK;
}
/* Reset the watermark. */
rte_ring_set_water_mark(r, 0);
/* Reset the ring stats. */
memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
/* Free memory before test completed */
free(src);
free(dst);
return 0;
fail:
free(src);
free(dst);
return -1;
#endif
}
/*
* it will always fail to create ring with a wrong ring size number in this function
*/
@ -1335,10 +929,6 @@ test_ring(void)
if (test_ring_basic() < 0)
return -1;
/* ring stats */
if (test_ring_stats() < 0)
return -1;
/* basic operations */
if (test_live_watermark_change() < 0)
return -1;