event/dsw: use C11 built-ins for atomics

Use C11-style GCC built-in functions for atomic operations.

Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
This commit is contained in:
Mattias Rönnblom 2021-01-26 17:36:50 +01:00 committed by Jerin Jacob
parent 13e06abc9d
commit 8b1af885f6
4 changed files with 28 additions and 24 deletions

View File

@ -61,9 +61,6 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
port->in_ring = in_ring;
port->ctl_in_ring = ctl_in_ring;
rte_atomic16_init(&port->load);
rte_atomic32_init(&port->immigration_load);
port->load_update_interval =
(DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S;
@ -275,7 +272,7 @@ dsw_start(struct rte_eventdev *dev)
uint16_t i;
uint64_t now;
rte_atomic32_init(&dsw->credits_on_loan);
dsw->credits_on_loan = 0;
initial_flow_to_port_assignment(dsw);

View File

@ -219,9 +219,9 @@ struct dsw_port {
struct rte_ring *ctl_in_ring __rte_cache_aligned;
/* Estimate of current port load. */
rte_atomic16_t load __rte_cache_aligned;
int16_t load __rte_cache_aligned;
/* Estimate of flows currently migrating to this port. */
rte_atomic32_t immigration_load __rte_cache_aligned;
int32_t immigration_load __rte_cache_aligned;
} __rte_cache_aligned;
struct dsw_queue {
@ -241,7 +241,7 @@ struct dsw_evdev {
uint8_t num_queues;
int32_t max_inflight;
rte_atomic32_t credits_on_loan __rte_cache_aligned;
int32_t credits_on_loan __rte_cache_aligned;
};
#define DSW_CTL_PAUS_REQ (0)

View File

@ -11,7 +11,6 @@
#include <stdbool.h>
#include <string.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_memcpy.h>
#include <rte_random.h>
@ -32,7 +31,8 @@ dsw_port_acquire_credits(struct dsw_evdev *dsw, struct dsw_port *port,
return true;
}
total_on_loan = rte_atomic32_read(&dsw->credits_on_loan);
total_on_loan =
__atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
available = dsw->max_inflight - total_on_loan;
acquired_credits = RTE_MAX(missing_credits, DSW_PORT_MIN_CREDITS);
@ -43,12 +43,14 @@ dsw_port_acquire_credits(struct dsw_evdev *dsw, struct dsw_port *port,
* thread can allocate tokens in between the check and the
* allocation.
*/
new_total_on_loan = rte_atomic32_add_return(&dsw->credits_on_loan,
acquired_credits);
new_total_on_loan =
__atomic_add_fetch(&dsw->credits_on_loan, acquired_credits,
__ATOMIC_RELAXED);
if (unlikely(new_total_on_loan > dsw->max_inflight)) {
/* Some other port took the last credits */
rte_atomic32_sub(&dsw->credits_on_loan, acquired_credits);
__atomic_sub_fetch(&dsw->credits_on_loan, acquired_credits,
__ATOMIC_RELAXED);
return false;
}
@ -74,7 +76,8 @@ dsw_port_return_credits(struct dsw_evdev *dsw, struct dsw_port *port,
port->inflight_credits = leave_credits;
rte_atomic32_sub(&dsw->credits_on_loan, return_credits);
__atomic_sub_fetch(&dsw->credits_on_loan, return_credits,
__ATOMIC_RELAXED);
DSW_LOG_DP_PORT(DEBUG, port->id,
"Returned %d tokens to pool.\n",
@ -152,19 +155,19 @@ dsw_port_load_update(struct dsw_port *port, uint64_t now)
int16_t period_load;
int16_t new_load;
old_load = rte_atomic16_read(&port->load);
old_load = __atomic_load_n(&port->load, __ATOMIC_RELAXED);
period_load = dsw_port_load_close_period(port, now);
new_load = (period_load + old_load*DSW_OLD_LOAD_WEIGHT) /
(DSW_OLD_LOAD_WEIGHT+1);
rte_atomic16_set(&port->load, new_load);
__atomic_store_n(&port->load, new_load, __ATOMIC_RELAXED);
/* The load of the recently immigrated flows should hopefully
* be reflected the load estimate by now.
*/
rte_atomic32_set(&port->immigration_load, 0);
__atomic_store_n(&port->immigration_load, 0, __ATOMIC_RELAXED);
}
static void
@ -367,9 +370,11 @@ dsw_retrieve_port_loads(struct dsw_evdev *dsw, int16_t *port_loads,
uint16_t i;
for (i = 0; i < dsw->num_ports; i++) {
int16_t measured_load = rte_atomic16_read(&dsw->ports[i].load);
int16_t measured_load =
__atomic_load_n(&dsw->ports[i].load, __ATOMIC_RELAXED);
int32_t immigration_load =
rte_atomic32_read(&dsw->ports[i].immigration_load);
__atomic_load_n(&dsw->ports[i].immigration_load,
__ATOMIC_RELAXED);
int32_t load = measured_load + immigration_load;
load = RTE_MIN(load, DSW_MAX_LOAD);
@ -502,8 +507,8 @@ dsw_select_emigration_target(struct dsw_evdev *dsw,
target_qfs[*targets_len] = *candidate_qf;
(*targets_len)++;
rte_atomic32_add(&dsw->ports[candidate_port_id].immigration_load,
candidate_flow_load);
__atomic_add_fetch(&dsw->ports[candidate_port_id].immigration_load,
candidate_flow_load, __ATOMIC_RELAXED);
return true;
}
@ -850,7 +855,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw,
return;
}
source_port_load = rte_atomic16_read(&source_port->load);
source_port_load =
__atomic_load_n(&source_port->load, __ATOMIC_RELAXED);
if (source_port_load < DSW_MIN_SOURCE_LOAD_FOR_MIGRATION) {
DSW_LOG_DP_PORT(DEBUG, source_port->id,
"Load %d is below threshold level %d.\n",
@ -1205,7 +1211,8 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port,
* simplicity reasons, we deny the whole burst if the port is
* above the water mark.
*/
if (unlikely(num_new > 0 && rte_atomic32_read(&dsw->credits_on_loan) >
if (unlikely(num_new > 0 &&
__atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED) >
source_port->new_event_threshold))
return 0;

View File

@ -48,7 +48,7 @@ struct dsw_xstats_port {
static uint64_t
dsw_xstats_dev_credits_on_loan(struct dsw_evdev *dsw)
{
return rte_atomic32_read(&dsw->credits_on_loan);
return __atomic_load_n(&dsw->credits_on_loan, __ATOMIC_RELAXED);
}
static struct dsw_xstat_dev dsw_dev_xstats[] = {
@ -126,7 +126,7 @@ dsw_xstats_port_get_load(struct dsw_evdev *dsw, uint8_t port_id,
{
int16_t load;
load = rte_atomic16_read(&dsw->ports[port_id].load);
load = __atomic_load_n(&dsw->ports[port_id].load, __ATOMIC_RELAXED);
return DSW_LOAD_TO_PERCENT(load);
}