app/eventdev: use compiler atomics for shared data sync
Convert rte_atomic usages to compiler atomic built-ins for shared data sync in eventdev cases. Signed-off-by: Joyce Kong <joyce.kong@arm.com> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
This commit is contained in:
parent
45f838d60a
commit
5109487205
@ -28,7 +28,7 @@ order_atq_worker(void *arg, const bool flow_id_cap)
|
|||||||
uint16_t event = rte_event_dequeue_burst(dev_id, port,
|
uint16_t event = rte_event_dequeue_burst(dev_id, port,
|
||||||
&ev, 1, 0);
|
&ev, 1, 0);
|
||||||
if (!event) {
|
if (!event) {
|
||||||
if (rte_atomic64_read(outstand_pkts) <= 0)
|
if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
|
||||||
break;
|
break;
|
||||||
rte_pause();
|
rte_pause();
|
||||||
continue;
|
continue;
|
||||||
@ -64,7 +64,7 @@ order_atq_worker_burst(void *arg, const bool flow_id_cap)
|
|||||||
BURST_SIZE, 0);
|
BURST_SIZE, 0);
|
||||||
|
|
||||||
if (nb_rx == 0) {
|
if (nb_rx == 0) {
|
||||||
if (rte_atomic64_read(outstand_pkts) <= 0)
|
if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
|
||||||
break;
|
break;
|
||||||
rte_pause();
|
rte_pause();
|
||||||
continue;
|
continue;
|
||||||
|
@ -187,7 +187,7 @@ order_test_setup(struct evt_test *test, struct evt_options *opt)
|
|||||||
evt_err("failed to allocate t->expected_flow_seq memory");
|
evt_err("failed to allocate t->expected_flow_seq memory");
|
||||||
goto exp_nomem;
|
goto exp_nomem;
|
||||||
}
|
}
|
||||||
rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
|
__atomic_store_n(&t->outstand_pkts, opt->nb_pkts, __ATOMIC_RELAXED);
|
||||||
t->err = false;
|
t->err = false;
|
||||||
t->nb_pkts = opt->nb_pkts;
|
t->nb_pkts = opt->nb_pkts;
|
||||||
t->nb_flows = opt->nb_flows;
|
t->nb_flows = opt->nb_flows;
|
||||||
@ -294,7 +294,7 @@ order_launch_lcores(struct evt_test *test, struct evt_options *opt,
|
|||||||
|
|
||||||
while (t->err == false) {
|
while (t->err == false) {
|
||||||
uint64_t new_cycles = rte_get_timer_cycles();
|
uint64_t new_cycles = rte_get_timer_cycles();
|
||||||
int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
|
int64_t remaining = __atomic_load_n(&t->outstand_pkts, __ATOMIC_RELAXED);
|
||||||
|
|
||||||
if (remaining <= 0) {
|
if (remaining <= 0) {
|
||||||
t->result = EVT_TEST_SUCCESS;
|
t->result = EVT_TEST_SUCCESS;
|
||||||
|
@ -48,7 +48,7 @@ struct test_order {
|
|||||||
* The atomic_* is an expensive operation,Since it is a functional test,
|
* The atomic_* is an expensive operation,Since it is a functional test,
|
||||||
* We are using the atomic_ operation to reduce the code complexity.
|
* We are using the atomic_ operation to reduce the code complexity.
|
||||||
*/
|
*/
|
||||||
rte_atomic64_t outstand_pkts;
|
uint64_t outstand_pkts;
|
||||||
enum evt_test_result result;
|
enum evt_test_result result;
|
||||||
uint32_t nb_flows;
|
uint32_t nb_flows;
|
||||||
uint64_t nb_pkts;
|
uint64_t nb_pkts;
|
||||||
@ -95,7 +95,7 @@ static __rte_always_inline void
|
|||||||
order_process_stage_1(struct test_order *const t,
|
order_process_stage_1(struct test_order *const t,
|
||||||
struct rte_event *const ev, const uint32_t nb_flows,
|
struct rte_event *const ev, const uint32_t nb_flows,
|
||||||
uint32_t *const expected_flow_seq,
|
uint32_t *const expected_flow_seq,
|
||||||
rte_atomic64_t *const outstand_pkts)
|
uint64_t *const outstand_pkts)
|
||||||
{
|
{
|
||||||
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
|
const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
|
||||||
/* compare the seqn against expected value */
|
/* compare the seqn against expected value */
|
||||||
@ -113,7 +113,7 @@ order_process_stage_1(struct test_order *const t,
|
|||||||
*/
|
*/
|
||||||
expected_flow_seq[flow]++;
|
expected_flow_seq[flow]++;
|
||||||
rte_pktmbuf_free(ev->mbuf);
|
rte_pktmbuf_free(ev->mbuf);
|
||||||
rte_atomic64_sub(outstand_pkts, 1);
|
__atomic_sub_fetch(outstand_pkts, 1, __ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __rte_always_inline void
|
static __rte_always_inline void
|
||||||
@ -132,7 +132,7 @@ order_process_stage_invalid(struct test_order *const t,
|
|||||||
const uint8_t port = w->port_id;\
|
const uint8_t port = w->port_id;\
|
||||||
const uint32_t nb_flows = t->nb_flows;\
|
const uint32_t nb_flows = t->nb_flows;\
|
||||||
uint32_t *expected_flow_seq = t->expected_flow_seq;\
|
uint32_t *expected_flow_seq = t->expected_flow_seq;\
|
||||||
rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
|
uint64_t *outstand_pkts = &t->outstand_pkts;\
|
||||||
if (opt->verbose_level > 1)\
|
if (opt->verbose_level > 1)\
|
||||||
printf("%s(): lcore %d dev_id %d port=%d\n",\
|
printf("%s(): lcore %d dev_id %d port=%d\n",\
|
||||||
__func__, rte_lcore_id(), dev_id, port)
|
__func__, rte_lcore_id(), dev_id, port)
|
||||||
|
@ -28,7 +28,7 @@ order_queue_worker(void *arg, const bool flow_id_cap)
|
|||||||
uint16_t event = rte_event_dequeue_burst(dev_id, port,
|
uint16_t event = rte_event_dequeue_burst(dev_id, port,
|
||||||
&ev, 1, 0);
|
&ev, 1, 0);
|
||||||
if (!event) {
|
if (!event) {
|
||||||
if (rte_atomic64_read(outstand_pkts) <= 0)
|
if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
|
||||||
break;
|
break;
|
||||||
rte_pause();
|
rte_pause();
|
||||||
continue;
|
continue;
|
||||||
@ -64,7 +64,7 @@ order_queue_worker_burst(void *arg, const bool flow_id_cap)
|
|||||||
BURST_SIZE, 0);
|
BURST_SIZE, 0);
|
||||||
|
|
||||||
if (nb_rx == 0) {
|
if (nb_rx == 0) {
|
||||||
if (rte_atomic64_read(outstand_pkts) <= 0)
|
if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
|
||||||
break;
|
break;
|
||||||
rte_pause();
|
rte_pause();
|
||||||
continue;
|
continue;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user