distributor: add unit tests

Add a set of unit tests and some basic performance test for the
distributor library. These tests cover all the major functionality of
the library on both distributor and worker sides.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Tested-by: Waterman Cao <waterman.cao@intel.com>
This commit is contained in:
Bruce Richardson 2014-05-29 11:12:17 +01:00 committed by Thomas Monjalon
parent 08ccf3faa6
commit c3eabff124
5 changed files with 880 additions and 1 deletions

View File

@ -93,6 +93,8 @@ SRCS-$(CONFIG_RTE_APP_TEST) += test_power.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_common.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_timer_perf.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_ivshmem.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_distributor.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_distributor_perf.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_devargs.c
ifeq ($(CONFIG_RTE_APP_TEST),y)

View File

@ -179,6 +179,10 @@ static void cmd_autotest_parsed(void *parsed_result,
ret = test_common();
if (!strcmp(res->autotest, "ivshmem_autotest"))
ret = test_ivshmem();
if (!strcmp(res->autotest, "distributor_autotest"))
ret = test_distributor();
if (!strcmp(res->autotest, "distributor_perf_autotest"))
ret = test_distributor_perf();
if (!strcmp(res->autotest, "devargs_autotest"))
ret = test_devargs();
#ifdef RTE_LIBRTE_PMD_RING
@ -238,7 +242,8 @@ cmdline_parse_token_string_t cmd_autotest_autotest =
#ifdef RTE_LIBRTE_KVARGS
"kvargs_autotest#"
#endif
"common_autotest");
"common_autotest#"
"distributor_autotest#distributor_perf_autotest");
cmdline_parse_inst_t cmd_autotest = {
.f = cmd_autotest_parsed, /* function to call */

View File

@ -92,6 +92,8 @@ int test_power(void);
int test_common(void);
int test_pmd_ring(void);
int test_ivshmem(void);
int test_distributor(void);
int test_distributor_perf(void);
int test_kvargs(void);
int test_devargs(void);

595
app/test/test_distributor.c Normal file
View File

@ -0,0 +1,595 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test.h"
#ifdef RTE_LIBRTE_DISTRIBUTOR
#include <unistd.h>
#include <string.h>
#include <rte_cycles.h>
#include <rte_errno.h>
#include <rte_distributor.h>
#define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
#define BURST 32
#define BIG_BATCH 1024
/* statics - all zero-initialized by default */
static volatile int quit; /**< general quit variable for all threads */
static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
static volatile unsigned worker_idx;
struct worker_stats {
volatile unsigned handled_packets;
} __rte_cache_aligned;
struct worker_stats worker_stats[RTE_MAX_LCORE];
/* returns the total count of the number of packets handled by the worker
* functions given below.
*/
static inline unsigned
total_packet_count(void)
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
count += worker_stats[i].handled_packets;
return count;
}
/* resets the packet counts for a new test */
static inline void
clear_packet_count(void)
{
memset(&worker_stats, 0, sizeof(worker_stats));
}
/* this is the basic worker function for sanity test
* it does nothing but return packets and count them.
*/
static int
handle_work(void *arg)
{
struct rte_mbuf *pkt = NULL;
struct rte_distributor *d = arg;
unsigned count = 0;
unsigned id = __sync_fetch_and_add(&worker_idx, 1);
pkt = rte_distributor_get_pkt(d, id, NULL);
while (!quit) {
worker_stats[id].handled_packets++, count++;
pkt = rte_distributor_get_pkt(d, id, pkt);
}
worker_stats[id].handled_packets++, count++;
rte_distributor_return_pkt(d, id, pkt);
return 0;
}
/* do basic sanity testing of the distributor. This test tests the following:
* - send 32 packets through distributor with the same tag and ensure they
* all go to the one worker
* - send 32 packets throught the distributor with two different tags and
* verify that they go equally to two different workers.
* - send 32 packets with different tags through the distributors and
* just verify we get all packets back.
* - send 1024 packets through the distributor, gathering the returned packets
* as we go. Then verify that we correctly got all 1024 pointers back again,
* not necessarily in the same order (as different flows).
*/
static int
sanity_test(struct rte_distributor *d, struct rte_mempool *p)
{
struct rte_mbuf *bufs[BURST];
unsigned i;
printf("=== Basic distributor sanity tests ===\n");
clear_packet_count();
if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
}
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
bufs[i]->pkt.hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
__LINE__, BURST, total_packet_count());
return -1;
}
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Sanity test with all zero hashes done.\n");
if (worker_stats[0].handled_packets != BURST)
return -1;
/* pick two flows and check they go correctly */
if (rte_lcore_count() >= 3) {
clear_packet_count();
for (i = 0; i < BURST; i++)
bufs[i]->pkt.hash.rss = (i & 1) << 8;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
__LINE__, BURST, total_packet_count());
return -1;
}
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Sanity test with two hash values done\n");
if (worker_stats[0].handled_packets != 16 ||
worker_stats[1].handled_packets != 16)
return -1;
}
/* give a different hash value to each packet,
* so load gets distributed */
clear_packet_count();
for (i = 0; i < BURST; i++)
bufs[i]->pkt.hash.rss = i;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
__LINE__, BURST, total_packet_count());
return -1;
}
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Sanity test with non-zero hashes done\n");
rte_mempool_put_bulk(p, (void *)bufs, BURST);
/* sanity test with BIG_BATCH packets to ensure they all arrived back
* from the returned packets function */
clear_packet_count();
struct rte_mbuf *many_bufs[BIG_BATCH], *return_bufs[BIG_BATCH];
unsigned num_returned = 0;
/* flush out any remaining packets */
rte_distributor_flush(d);
rte_distributor_clear_returns(d);
if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
}
for (i = 0; i < BIG_BATCH; i++)
many_bufs[i]->pkt.hash.rss = i << 2;
for (i = 0; i < BIG_BATCH/BURST; i++) {
rte_distributor_process(d, &many_bufs[i*BURST], BURST);
num_returned += rte_distributor_returned_pkts(d,
&return_bufs[num_returned],
BIG_BATCH - num_returned);
}
rte_distributor_flush(d);
num_returned += rte_distributor_returned_pkts(d,
&return_bufs[num_returned], BIG_BATCH - num_returned);
if (num_returned != BIG_BATCH) {
printf("line %d: Number returned is not the same as "
"number sent\n", __LINE__);
return -1;
}
/* big check - make sure all packets made it back!! */
for (i = 0; i < BIG_BATCH; i++) {
unsigned j;
struct rte_mbuf *src = many_bufs[i];
for (j = 0; j < BIG_BATCH; j++)
if (return_bufs[j] == src)
break;
if (j == BIG_BATCH) {
printf("Error: could not find source packet #%u\n", i);
return -1;
}
}
printf("Sanity test of returned packets done\n");
rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
printf("\n");
return 0;
}
/* to test that the distributor does not lose packets, we use this worker
* function which frees mbufs when it gets them. The distributor thread does
* the mbuf allocation. If distributor drops packets we'll eventually run out
* of mbufs.
*/
static int
handle_work_with_free_mbufs(void *arg)
{
struct rte_mbuf *pkt = NULL;
struct rte_distributor *d = arg;
unsigned count = 0;
unsigned id = __sync_fetch_and_add(&worker_idx, 1);
pkt = rte_distributor_get_pkt(d, id, NULL);
while (!quit) {
worker_stats[id].handled_packets++, count++;
rte_pktmbuf_free(pkt);
pkt = rte_distributor_get_pkt(d, id, pkt);
}
worker_stats[id].handled_packets++, count++;
rte_distributor_return_pkt(d, id, pkt);
return 0;
}
/* Perform a sanity test of the distributor with a large number of packets,
* where we allocate a new set of mbufs for each burst. The workers then
* free the mbufs. This ensures that we don't have any packet leaks in the
* library.
*/
static int
sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)
{
unsigned i;
struct rte_mbuf *bufs[BURST];
printf("=== Sanity test with mbuf alloc/free ===\n");
clear_packet_count();
for (i = 0; i < ((1<<ITER_POWER)); i += BURST) {
unsigned j;
while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
rte_distributor_process(d, NULL, 0);
for (j = 0; j < BURST; j++) {
bufs[j]->pkt.hash.rss = (i+j) << 1;
bufs[j]->refcnt = 1;
}
rte_distributor_process(d, bufs, BURST);
}
rte_distributor_flush(d);
if (total_packet_count() < (1<<ITER_POWER)) {
printf("Line %u: Packet count is incorrect, %u, expected %u\n",
__LINE__, total_packet_count(),
(1<<ITER_POWER));
return -1;
}
printf("Sanity test with mbuf alloc/free passed\n\n");
return 0;
}
static int
handle_work_for_shutdown_test(void *arg)
{
struct rte_mbuf *pkt = NULL;
struct rte_distributor *d = arg;
unsigned count = 0;
const unsigned id = __sync_fetch_and_add(&worker_idx, 1);
pkt = rte_distributor_get_pkt(d, id, NULL);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == 0 && zero_quit)) {
worker_stats[id].handled_packets++, count++;
rte_pktmbuf_free(pkt);
pkt = rte_distributor_get_pkt(d, id, NULL);
}
worker_stats[id].handled_packets++, count++;
rte_distributor_return_pkt(d, id, pkt);
if (id == 0) {
/* for worker zero, allow it to restart to pick up last packet
* when all workers are shutting down.
*/
while (zero_quit)
usleep(100);
pkt = rte_distributor_get_pkt(d, id, NULL);
while (!quit) {
worker_stats[id].handled_packets++, count++;
rte_pktmbuf_free(pkt);
pkt = rte_distributor_get_pkt(d, id, NULL);
}
rte_distributor_return_pkt(d, id, pkt);
}
return 0;
}
/* Perform a sanity test of the distributor with a large number of packets,
* where we allocate a new set of mbufs for each burst. The workers then
* free the mbufs. This ensures that we don't have any packet leaks in the
* library.
*/
static int
sanity_test_with_worker_shutdown(struct rte_distributor *d,
struct rte_mempool *p)
{
struct rte_mbuf *bufs[BURST];
unsigned i;
printf("=== Sanity test of worker shutdown ===\n");
clear_packet_count();
if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
}
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
bufs[i]->pkt.hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
* backlog for the other ones at worker 0.
*/
/* get more buffers to queue up, again setting them to the same flow */
if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
}
for (i = 0; i < BURST; i++)
bufs[i]->pkt.hash.rss = 0;
/* get worker zero to quit */
zero_quit = 1;
rte_distributor_process(d, bufs, BURST);
/* flush the distributor */
rte_distributor_flush(d);
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
__LINE__, BURST * 2, total_packet_count());
return -1;
}
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Sanity test with worker shutdown passed\n\n");
return 0;
}
/* Test that the flush function is able to move packets between workers when
* one worker shuts down..
*/
static int
test_flush_with_worker_shutdown(struct rte_distributor *d,
struct rte_mempool *p)
{
struct rte_mbuf *bufs[BURST];
unsigned i;
printf("=== Test flush fn with worker shutdown ===\n");
clear_packet_count();
if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
}
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
bufs[i]->pkt.hash.rss = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
* backlog for the other ones at worker 0.
*/
/* get worker zero to quit */
zero_quit = 1;
/* flush the distributor */
rte_distributor_flush(d);
zero_quit = 0;
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
__LINE__, BURST, total_packet_count());
return -1;
}
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Flush test with worker shutdown passed\n\n");
return 0;
}
static
int test_error_distributor_create_name(void)
{
struct rte_distributor *d = NULL;
char *name = NULL;
d = rte_distributor_create(name, rte_socket_id(),
rte_lcore_count() - 1);
if (d != NULL || rte_errno != EINVAL) {
printf("ERROR: No error on create() with NULL name param\n");
return -1;
}
return 0;
}
static
int test_error_distributor_create_numworkers(void)
{
struct rte_distributor *d = NULL;
d = rte_distributor_create("test_numworkers", rte_socket_id(),
RTE_MAX_LCORE + 10);
if (d != NULL || rte_errno != EINVAL) {
printf("ERROR: No error on create() with num_workers > MAX\n");
return -1;
}
return 0;
}
/* Useful function which ensures that all worker functions terminate */
static void
quit_workers(struct rte_distributor *d, struct rte_mempool *p)
{
const unsigned num_workers = rte_lcore_count() - 1;
unsigned i;
struct rte_mbuf *bufs[RTE_MAX_LCORE];
rte_mempool_get_bulk(p, (void *)bufs, num_workers);
zero_quit = 0;
quit = 1;
for (i = 0; i < num_workers; i++)
bufs[i]->pkt.hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
rte_distributor_process(d, NULL, 0);
rte_distributor_flush(d);
rte_eal_mp_wait_lcore();
quit = 0;
worker_idx = 0;
}
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
int
test_distributor(void)
{
static struct rte_distributor *d;
static struct rte_mempool *p;
if (rte_lcore_count() < 2) {
printf("ERROR: not enough cores to test distributor\n");
return -1;
}
if (d == NULL) {
d = rte_distributor_create("Test_distributor", rte_socket_id(),
rte_lcore_count() - 1);
if (d == NULL) {
printf("Error creating distributor\n");
return -1;
}
} else {
rte_distributor_flush(d);
rte_distributor_clear_returns(d);
}
const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
if (p == NULL) {
p = rte_mempool_create("DT_MBUF_POOL", nb_bufs,
MBUF_SIZE, BURST,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(), 0);
if (p == NULL) {
printf("Error creating mempool\n");
return -1;
}
}
rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER);
if (sanity_test(d, p) < 0)
goto err;
quit_workers(d, p);
rte_eal_mp_remote_launch(handle_work_with_free_mbufs, d, SKIP_MASTER);
if (sanity_test_with_mbuf_alloc(d, p) < 0)
goto err;
quit_workers(d, p);
if (rte_lcore_count() > 2) {
rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d,
SKIP_MASTER);
if (sanity_test_with_worker_shutdown(d, p) < 0)
goto err;
quit_workers(d, p);
rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d,
SKIP_MASTER);
if (test_flush_with_worker_shutdown(d, p) < 0)
goto err;
quit_workers(d, p);
} else {
printf("Not enough cores to run tests for worker shutdown\n");
}
if (test_error_distributor_create_numworkers() == -1 ||
test_error_distributor_create_name() == -1) {
printf("rte_distributor_create parameter check tests failed");
return -1;
}
return 0;
err:
quit_workers(d, p);
return -1;
}
#else
#include <stdio.h>
int
test_distributor(void)
{
printf("Distributor is not enabled in configuration\n");
return 0;
}
#endif

View File

@ -0,0 +1,275 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test.h"
#ifdef RTE_LIBRTE_DISTRIBUTOR
#include <unistd.h>
#include <string.h>
#include <rte_cycles.h>
#include <rte_distributor.h>
#define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
#define BURST 32
#define BIG_BATCH 1024
/* static vars - zero initialized by default */
static volatile int quit;
static volatile unsigned worker_idx;
struct worker_stats {
volatile unsigned handled_packets;
} __rte_cache_aligned;
struct worker_stats worker_stats[RTE_MAX_LCORE];
/* worker thread used for testing the time to do a round-trip of a cache
* line between two cores and back again
*/
static void
flip_bit(volatile uint64_t *arg)
{
uint64_t old_val = 0;
while (old_val != 2) {
while (!*arg)
rte_pause();
old_val = *arg;
*arg = 0;
}
}
/* test case to time the number of cycles to round-trip a cache line between
* two cores and back again.
*/
static void
time_cache_line_switch(void)
{
/* allocate a full cache line for data, we use only first byte of it */
uint64_t data[CACHE_LINE_SIZE*3 / sizeof(uint64_t)];
unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
volatile uint64_t *pdata = &data[0];
*pdata = 1;
rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], slaveid);
while (*pdata)
rte_pause();
const uint64_t start_time = rte_rdtsc();
for (i = 0; i < (1 << ITER_POWER); i++) {
while (*pdata)
rte_pause();
*pdata = 1;
}
const uint64_t end_time = rte_rdtsc();
while (*pdata)
rte_pause();
*pdata = 2;
rte_eal_wait_lcore(slaveid);
printf("==== Cache line switch test ===\n");
printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER),
end_time-start_time);
printf("Ticks per iteration = %"PRIu64"\n\n",
(end_time-start_time) >> ITER_POWER);
}
/* returns the total count of the number of packets handled by the worker
* functions given below.
*/
static unsigned
total_packet_count(void)
{
unsigned i, count = 0;
for (i = 0; i < worker_idx; i++)
count += worker_stats[i].handled_packets;
return count;
}
/* resets the packet counts for a new test */
static void
clear_packet_count(void)
{
memset(&worker_stats, 0, sizeof(worker_stats));
}
/* this is the basic worker function for performance tests.
* it does nothing but return packets and count them.
*/
static int
handle_work(void *arg)
{
struct rte_mbuf *pkt = NULL;
struct rte_distributor *d = arg;
unsigned count = 0;
unsigned id = __sync_fetch_and_add(&worker_idx, 1);
pkt = rte_distributor_get_pkt(d, id, NULL);
while (!quit) {
worker_stats[id].handled_packets++, count++;
pkt = rte_distributor_get_pkt(d, id, pkt);
}
worker_stats[id].handled_packets++, count++;
rte_distributor_return_pkt(d, id, pkt);
return 0;
}
/* this basic performance test just repeatedly sends in 32 packets at a time
* to the distributor and verifies at the end that we got them all in the worker
* threads and finally how long per packet the processing took.
*/
static inline int
perf_test(struct rte_distributor *d, struct rte_mempool *p)
{
unsigned i;
uint64_t start, end;
struct rte_mbuf *bufs[BURST];
clear_packet_count();
if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
printf("Error getting mbufs from pool\n");
return -1;
}
/* ensure we have different hash value for each pkt */
for (i = 0; i < BURST; i++)
bufs[i]->pkt.hash.rss = i;
start = rte_rdtsc();
for (i = 0; i < (1<<ITER_POWER); i++)
rte_distributor_process(d, bufs, BURST);
end = rte_rdtsc();
do {
usleep(100);
rte_distributor_process(d, NULL, 0);
} while (total_packet_count() < (BURST << ITER_POWER));
printf("=== Performance test of distributor ===\n");
printf("Time per burst: %"PRIu64"\n", (end - start) >> ITER_POWER);
printf("Time per packet: %"PRIu64"\n\n",
((end - start) >> ITER_POWER)/BURST);
rte_mempool_put_bulk(p, (void *)bufs, BURST);
for (i = 0; i < rte_lcore_count() - 1; i++)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Total packets: %u (%x)\n", total_packet_count(),
total_packet_count());
printf("=== Perf test done ===\n\n");
return 0;
}
/* Useful function which ensures that all worker functions terminate */
static void
quit_workers(struct rte_distributor *d, struct rte_mempool *p)
{
const unsigned num_workers = rte_lcore_count() - 1;
unsigned i;
struct rte_mbuf *bufs[RTE_MAX_LCORE];
rte_mempool_get_bulk(p, (void *)bufs, num_workers);
quit = 1;
for (i = 0; i < num_workers; i++)
bufs[i]->pkt.hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
rte_distributor_process(d, NULL, 0);
rte_eal_mp_wait_lcore();
quit = 0;
worker_idx = 0;
}
#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
int
test_distributor_perf(void)
{
static struct rte_distributor *d;
static struct rte_mempool *p;
if (rte_lcore_count() < 2) {
printf("ERROR: not enough cores to test distributor\n");
return -1;
}
/* first time how long it takes to round-trip a cache line */
time_cache_line_switch();
if (d == NULL) {
d = rte_distributor_create("Test_perf", rte_socket_id(),
rte_lcore_count() - 1);
if (d == NULL) {
printf("Error creating distributor\n");
return -1;
}
} else {
rte_distributor_flush(d);
rte_distributor_clear_returns(d);
}
const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
(BIG_BATCH * 2) - 1 : (511 * rte_lcore_count());
if (p == NULL) {
p = rte_mempool_create("DPT_MBUF_POOL", nb_bufs,
MBUF_SIZE, BURST,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
rte_pktmbuf_init, NULL,
rte_socket_id(), 0);
if (p == NULL) {
printf("Error creating mempool\n");
return -1;
}
}
rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER);
if (perf_test(d, p) < 0)
return -1;
quit_workers(d, p);
return 0;
}
#else
#include <stdio.h>
int
test_distributor_perf(void)
{
printf("Distributor is not enabled in configuration\n");
return 0;
}
#endif