test/eventdev: use service iter
Use service run iter for event scheduling instead of calling the event schedule api directly. Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com> Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
This commit is contained in:
parent
57305d794e
commit
87751f82e5
@ -49,6 +49,8 @@
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_eventdev.h>
|
||||
#include <rte_pause.h>
|
||||
#include <rte_service.h>
|
||||
#include <rte_service_component.h>
|
||||
|
||||
#include "test.h"
|
||||
|
||||
@ -63,6 +65,7 @@ struct test {
|
||||
uint8_t port[MAX_PORTS];
|
||||
uint8_t qid[MAX_QIDS];
|
||||
int nb_qids;
|
||||
uint32_t service_id;
|
||||
};
|
||||
|
||||
static struct rte_event release_ev;
|
||||
@ -415,7 +418,7 @@ run_prio_packet_test(struct test *t)
|
||||
}
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
struct test_event_dev_stats stats;
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
@ -507,7 +510,7 @@ test_single_directed_packet(struct test *t)
|
||||
}
|
||||
|
||||
/* Run schedule() as dir packets may need to be re-ordered */
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
struct test_event_dev_stats stats;
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
@ -574,7 +577,7 @@ test_directed_forward_credits(struct test *t)
|
||||
printf("%d: error failed to enqueue\n", __LINE__);
|
||||
return -1;
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
uint32_t deq_pkts;
|
||||
deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
|
||||
@ -736,7 +739,7 @@ burst_packets(struct test *t)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/* Check stats for all NUM_PKTS arrived to sched core */
|
||||
struct test_event_dev_stats stats;
|
||||
@ -825,7 +828,7 @@ abuse_inflights(struct test *t)
|
||||
}
|
||||
|
||||
/* schedule */
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
struct test_event_dev_stats stats;
|
||||
|
||||
@ -963,7 +966,7 @@ xstats_tests(struct test *t)
|
||||
}
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/* Device names / values */
|
||||
int num_stats = rte_event_dev_xstats_names_get(evdev,
|
||||
@ -1290,7 +1293,7 @@ port_reconfig_credits(struct test *t)
|
||||
}
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
struct rte_event ev[NPKTS];
|
||||
int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
|
||||
@ -1516,7 +1519,7 @@ xstats_id_reset_tests(struct test *t)
|
||||
}
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
static const char * const dev_names[] = {
|
||||
"dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
|
||||
@ -1907,7 +1910,7 @@ qid_priorities(struct test *t)
|
||||
}
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/* dequeue packets, verify priority was upheld */
|
||||
struct rte_event ev[32];
|
||||
@ -1988,7 +1991,7 @@ load_balancing(struct test *t)
|
||||
}
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
struct test_event_dev_stats stats;
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
@ -2088,7 +2091,7 @@ load_balancing_history(struct test *t)
|
||||
}
|
||||
|
||||
/* call the scheduler */
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/* Dequeue the flow 0 packet from port 1, so that we can then drop */
|
||||
struct rte_event ev;
|
||||
@ -2105,7 +2108,7 @@ load_balancing_history(struct test *t)
|
||||
rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
|
||||
|
||||
/* call the scheduler */
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/*
|
||||
* Set up the next set of flows, first a new flow to fill up
|
||||
@ -2138,7 +2141,7 @@ load_balancing_history(struct test *t)
|
||||
}
|
||||
|
||||
/* schedule */
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
if (err) {
|
||||
@ -2182,7 +2185,7 @@ load_balancing_history(struct test *t)
|
||||
while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
|
||||
rte_event_enqueue_burst(evdev, i, &release_ev, 1);
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
cleanup(t);
|
||||
return 0;
|
||||
@ -2248,7 +2251,7 @@ invalid_qid(struct test *t)
|
||||
}
|
||||
|
||||
/* call the scheduler */
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
if (err) {
|
||||
@ -2333,7 +2336,7 @@ single_packet(struct test *t)
|
||||
return -1;
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
if (err) {
|
||||
@ -2376,7 +2379,7 @@ single_packet(struct test *t)
|
||||
printf("%d: Failed to enqueue\n", __LINE__);
|
||||
return -1;
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
if (stats.port_inflight[wrk_enq] != 0) {
|
||||
@ -2464,7 +2467,7 @@ inflight_counts(struct test *t)
|
||||
}
|
||||
|
||||
/* schedule */
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
if (err) {
|
||||
@ -2520,7 +2523,7 @@ inflight_counts(struct test *t)
|
||||
* As the scheduler core decrements inflights, it needs to run to
|
||||
* process packets to act on the drop messages
|
||||
*/
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
if (stats.port_inflight[p1] != 0) {
|
||||
@ -2555,7 +2558,7 @@ inflight_counts(struct test *t)
|
||||
* As the scheduler core decrements inflights, it needs to run to
|
||||
* process packets to act on the drop messages
|
||||
*/
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
err = test_event_dev_stats_get(evdev, &stats);
|
||||
if (stats.port_inflight[p2] != 0) {
|
||||
@ -2649,7 +2652,7 @@ parallel_basic(struct test *t, int check_order)
|
||||
}
|
||||
}
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/* use extra slot to make logic in loops easier */
|
||||
struct rte_event deq_ev[w3_port + 1];
|
||||
@ -2676,7 +2679,7 @@ parallel_basic(struct test *t, int check_order)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/* dequeue from the tx ports, we should get 3 packets */
|
||||
deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
|
||||
@ -2754,7 +2757,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
|
||||
printf("%d: Error doing first enqueue\n", __LINE__);
|
||||
goto err;
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
|
||||
!= 1)
|
||||
@ -2779,7 +2782,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
|
||||
printf("%d: Error with enqueue\n", __LINE__);
|
||||
goto err;
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
} while (rte_event_dev_xstats_by_name_get(evdev,
|
||||
rx_port_free_stat, NULL) != 0);
|
||||
|
||||
@ -2789,7 +2792,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
|
||||
printf("%d: Error with enqueue\n", __LINE__);
|
||||
goto err;
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
/* check that the other port still has an empty CQ */
|
||||
if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
|
||||
@ -2812,7 +2815,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
|
||||
printf("%d: Error with enqueue\n", __LINE__);
|
||||
goto err;
|
||||
}
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
|
||||
!= 1) {
|
||||
@ -3002,7 +3005,7 @@ worker_loopback(struct test *t)
|
||||
while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
|
||||
rte_eal_get_lcore_state(w_lcore) != FINISHED) {
|
||||
|
||||
rte_event_schedule(evdev);
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
|
||||
uint64_t new_cycles = rte_get_timer_cycles();
|
||||
|
||||
@ -3029,7 +3032,8 @@ worker_loopback(struct test *t)
|
||||
cycles = new_cycles;
|
||||
}
|
||||
}
|
||||
rte_event_schedule(evdev); /* ensure all completions are flushed */
|
||||
rte_service_run_iter_on_app_lcore(t->service_id);
|
||||
/* ensure all completions are flushed */
|
||||
|
||||
rte_eal_mp_wait_lcore();
|
||||
|
||||
@ -3066,6 +3070,14 @@ test_sw_eventdev(void)
|
||||
}
|
||||
}
|
||||
|
||||
if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
|
||||
printf("Failed to get service ID for software event dev\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
rte_service_runstate_set(t->service_id, 1);
|
||||
rte_service_set_runstate_mapped_check(t->service_id, 0);
|
||||
|
||||
/* Only create mbuf pool once, reuse for each test run */
|
||||
if (!eventdev_func_mempool) {
|
||||
eventdev_func_mempool = rte_pktmbuf_pool_create(
|
||||
|
Loading…
x
Reference in New Issue
Block a user