a9de470cc7
Since all other apps have been moved to the "app" folder, the autotest app remains alone in the test folder. Rather than having an entire top-level folder for this, we can move it back to where it all started in early versions of DPDK - the "app/" folder. This move has a couple of advantages: * This reduces clutter at the top level of the project, due to one less folder. * It eliminates the separate build task necessary for building the autotests using make "make test-build" which means that developers are less likely to miss something in their own compilation tests * It re-aligns the final location of the test binary in the app folder when building with make with it's location in the source tree. For meson builds, the autotest app is different from the other apps in that it needs a series of different test cases defined for it for use by "meson test". Therefore, it does not get built as part of the main loop in the app folder, but gets built separately at the end. Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
225 lines
5.7 KiB
C
225 lines
5.7 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2018 Intel Corporation
|
|
*/
|
|
|
|
#include <stdio.h>
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
#include <rte_latencystats.h>
|
|
#include "rte_lcore.h"
|
|
#include "rte_metrics.h"
|
|
|
|
#include "sample_packet_forward.h"
|
|
#include "test.h"
|
|
|
|
#define NUM_STATS 4
|
|
#define LATENCY_NUM_PACKETS 10
|
|
#define QUEUE_ID 0
|
|
|
|
uint16_t portid;
|
|
struct rte_ring *ring;
|
|
|
|
struct rte_metric_name lat_stats_strings[] = {
|
|
{"min_latency_ns"},
|
|
{"avg_latency_ns"},
|
|
{"max_latency_ns"},
|
|
{"jitter_ns"},
|
|
};
|
|
|
|
/* Test case for latency init with metrics init */
|
|
static int test_latency_init(void)
|
|
{
|
|
int ret = 0;
|
|
|
|
/* Metrics Initialization */
|
|
rte_metrics_init(rte_socket_id());
|
|
|
|
ret = rte_latencystats_init(1, NULL);
|
|
TEST_ASSERT(ret >= 0, "Test Failed: rte_latencystats_init failed");
|
|
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
/* Test case to update the latency stats */
|
|
static int test_latency_update(void)
|
|
{
|
|
int ret = 0;
|
|
|
|
ret = rte_latencystats_update();
|
|
TEST_ASSERT(ret >= 0, "Test Failed: rte_latencystats_update failed");
|
|
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
/* Test case to uninit latency stats */
|
|
static int test_latency_uninit(void)
|
|
{
|
|
int ret = 0;
|
|
|
|
ret = rte_latencystats_uninit();
|
|
TEST_ASSERT(ret >= 0, "Test Failed: rte_latencystats_uninit failed");
|
|
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
/* Test case to get names of latency stats */
|
|
static int test_latencystats_get_names(void)
|
|
{
|
|
int ret = 0, i = 0;
|
|
int size = 0;
|
|
struct rte_metric_name names[NUM_STATS];
|
|
struct rte_metric_name wrongnames[NUM_STATS - 2];
|
|
|
|
size_t m_size = sizeof(struct rte_metric_name);
|
|
for (i = 0; i < NUM_STATS; i++)
|
|
memset(&names[i], 0, m_size);
|
|
for (i = 0; i < NUM_STATS - 2; i++)
|
|
memset(&wrongnames[i], 0, m_size);
|
|
|
|
/* Success Test: Valid names and size */
|
|
size = NUM_STATS;
|
|
ret = rte_latencystats_get_names(names, size);
|
|
for (i = 0; i <= NUM_STATS; i++) {
|
|
if (strcmp(lat_stats_strings[i].name, names[i].name) == 0)
|
|
printf(" %s\n", names[i].name);
|
|
else
|
|
printf("Failed: Names are not matched\n");
|
|
}
|
|
TEST_ASSERT((ret == NUM_STATS), "Test Failed to get metrics names");
|
|
|
|
/* Failure Test: Invalid names and valid size */
|
|
ret = rte_latencystats_get_names(NULL, size);
|
|
TEST_ASSERT((ret == NUM_STATS), "Test Failed to get the metrics count,"
|
|
"Actual: %d Expected: %d", ret, NUM_STATS);
|
|
|
|
/* Failure Test: Valid names and invalid size */
|
|
size = 0;
|
|
ret = rte_latencystats_get_names(names, size);
|
|
TEST_ASSERT((ret == NUM_STATS), "Test Failed to get the metrics count,"
|
|
"Actual: %d Expected: %d", ret, NUM_STATS);
|
|
|
|
/* Failure Test: Invalid names (array size lesser than size) */
|
|
size = NUM_STATS + 1;
|
|
ret = rte_latencystats_get_names(wrongnames, size);
|
|
TEST_ASSERT((ret == NUM_STATS), "Test Failed to get metrics names");
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
/* Test case to get latency stats values */
|
|
static int test_latencystats_get(void)
|
|
{
|
|
int ret = 0, i = 0;
|
|
int size = 0;
|
|
struct rte_metric_value values[NUM_STATS];
|
|
struct rte_metric_value wrongvalues[NUM_STATS - 2];
|
|
|
|
size_t v_size = sizeof(struct rte_metric_value);
|
|
for (i = 0; i < NUM_STATS; i++)
|
|
memset(&values[i], 0, v_size);
|
|
for (i = 0; i < NUM_STATS - 2; i++)
|
|
memset(&wrongvalues[i], 0, v_size);
|
|
|
|
/* Success Test: Valid values and valid size */
|
|
size = NUM_STATS;
|
|
ret = rte_latencystats_get(values, size);
|
|
TEST_ASSERT((ret == NUM_STATS), "Test Failed to get latency metrics"
|
|
" values");
|
|
|
|
/* Failure Test: Invalid values and valid size */
|
|
ret = rte_latencystats_get(NULL, size);
|
|
TEST_ASSERT((ret == NUM_STATS), "Test Failed to get the stats count,"
|
|
"Actual: %d Expected: %d", ret, NUM_STATS);
|
|
|
|
/* Failure Test: Valid values and invalid size */
|
|
size = 0;
|
|
ret = rte_latencystats_get(values, size);
|
|
TEST_ASSERT((ret == NUM_STATS), "Test Failed to get the stats count,"
|
|
"Actual: %d Expected: %d", ret, NUM_STATS);
|
|
|
|
/* Failure Test: Invalid values(array size lesser than size)
|
|
* and invalid size
|
|
*/
|
|
size = NUM_STATS + 2;
|
|
ret = rte_latencystats_get(wrongvalues, size);
|
|
TEST_ASSERT(ret == NUM_STATS, "Test Failed to get latency metrics"
|
|
" values");
|
|
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
static int test_latency_ring_setup(void)
|
|
{
|
|
test_ring_setup(&ring, &portid);
|
|
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
static void test_latency_ring_free(void)
|
|
{
|
|
test_ring_free(ring);
|
|
test_vdev_uninit("net_ring_net_ringa");
|
|
}
|
|
|
|
static int test_latency_packet_forward(void)
|
|
{
|
|
int ret;
|
|
struct rte_mbuf *pbuf[LATENCY_NUM_PACKETS] = { };
|
|
struct rte_mempool *mp;
|
|
char poolname[] = "mbuf_pool";
|
|
|
|
ret = test_get_mbuf_from_pool(&mp, pbuf, poolname);
|
|
if (ret < 0) {
|
|
printf("allocate mbuf pool Failed\n");
|
|
return TEST_FAILED;
|
|
}
|
|
ret = test_packet_forward(pbuf, portid, QUEUE_ID);
|
|
if (ret < 0)
|
|
printf("send pkts Failed\n");
|
|
test_put_mbuf_to_pool(mp, pbuf);
|
|
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
static struct
|
|
unit_test_suite latencystats_testsuite = {
|
|
.suite_name = "Latency Stats Unit Test Suite",
|
|
.setup = test_latency_ring_setup,
|
|
.teardown = test_latency_ring_free,
|
|
.unit_test_cases = {
|
|
|
|
/* Test Case 1: To check latency init with
|
|
* metrics init
|
|
*/
|
|
TEST_CASE_ST(NULL, NULL, test_latency_init),
|
|
|
|
/* Test Case 2: Do packet forwarding for metrics
|
|
* calculation and check the latency metrics values
|
|
* are updated
|
|
*/
|
|
TEST_CASE_ST(test_latency_packet_forward, NULL,
|
|
test_latency_update),
|
|
/* Test Case 3: To check whether latency stats names
|
|
* are retrieved
|
|
*/
|
|
TEST_CASE_ST(NULL, NULL, test_latencystats_get_names),
|
|
|
|
/* Test Case 4: To check whether latency stats
|
|
* values are retrieved
|
|
*/
|
|
TEST_CASE_ST(NULL, NULL, test_latencystats_get),
|
|
|
|
/* Test Case 5: To check uninit of latency test */
|
|
TEST_CASE_ST(NULL, NULL, test_latency_uninit),
|
|
|
|
TEST_CASES_END()
|
|
}
|
|
};
|
|
|
|
static int test_latencystats(void)
|
|
{
|
|
return unit_test_suite_runner(&latencystats_testsuite);
|
|
}
|
|
|
|
REGISTER_TEST_COMMAND(latencystats_autotest, test_latencystats);
|