0bee070907
A sequence lock (seqlock) is a synchronization primitive which allows for data-race free, low-overhead, high-frequency reads, suitable for data structures shared across many cores and which are updated relatively infrequently. A seqlock permits multiple parallel readers. A spinlock is used to serialize writers. In cases where there is only a single writer, or writer-writer synchronization is done by some external means, the "raw" sequence counter type (and accompanying rte_seqcount_*() functions) may be used instead. To avoid resource reclamation and other issues, the data protected by a seqlock is best off being self-contained (i.e., no pointers [except to constant data]). One way to think about seqlocks is that they provide means to perform atomic operations on data objects larger than what the native atomic machine instructions allow for. DPDK seqlocks (and the underlying sequence counters) are not preemption safe on the writer side. A thread preemption affects performance, not correctness. A seqlock contains a sequence number, which can be thought of as the generation of the data it protects. A reader will 1. Load the sequence number (sn). 2. Load, in arbitrary order, the seqlock-protected data. 3. Load the sn again. 4. Check if the first and second sn are equal, and even numbered. If they are not, discard the loaded data, and restart from 1. The first three steps need to be ordered using suitable memory fences. A writer will 1. Take the spinlock, to serialize writer access. 2. Load the sn. 3. Store the original sn + 1 as the new sn. 4. Perform load and stores to the seqlock-protected data. 5. Store the original sn + 2 as the new sn. 6. Release the spinlock. Proper memory fencing is required to make sure the first sn store, the data stores, and the second sn store appear to the reader in the mentioned order. The sn loads and stores must be atomic, but the data loads and stores need not be. The original seqlock design and implementation was done by Stephen Hemminger. This is an independent implementation, using C11 atomics. For more information on seqlocks, see https://en.wikipedia.org/wiki/Seqlock Acked-by: Morten Brørup <mb@smartsharesystems.com> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com> Reviewed-by: Chengwen Feng <fengchengwen@huawei.com> Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
191 lines
3.7 KiB
C
191 lines
3.7 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2022 Ericsson AB
|
|
*/
|
|
|
|
#include <rte_seqlock.h>
|
|
|
|
#include <rte_cycles.h>
|
|
#include <rte_malloc.h>
|
|
#include <rte_random.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include "test.h"
|
|
|
|
struct data {
|
|
rte_seqlock_t lock;
|
|
|
|
uint64_t a;
|
|
uint64_t b __rte_cache_aligned;
|
|
uint64_t c __rte_cache_aligned;
|
|
} __rte_cache_aligned;
|
|
|
|
struct reader {
|
|
struct data *data;
|
|
uint8_t stop;
|
|
};
|
|
|
|
#define WRITER_RUNTIME 2.0 /* s */
|
|
|
|
#define WRITER_MAX_DELAY 100 /* us */
|
|
|
|
#define INTERRUPTED_WRITER_FREQUENCY 1000
|
|
#define WRITER_INTERRUPT_TIME 1 /* us */
|
|
|
|
static int
|
|
writer_run(void *arg)
|
|
{
|
|
struct data *data = arg;
|
|
uint64_t deadline;
|
|
|
|
deadline = rte_get_timer_cycles() +
|
|
WRITER_RUNTIME * rte_get_timer_hz();
|
|
|
|
while (rte_get_timer_cycles() < deadline) {
|
|
bool interrupted;
|
|
uint64_t new_value;
|
|
unsigned int delay;
|
|
|
|
new_value = rte_rand();
|
|
|
|
interrupted = rte_rand_max(INTERRUPTED_WRITER_FREQUENCY) == 0;
|
|
|
|
rte_seqlock_write_lock(&data->lock);
|
|
|
|
data->c = new_value;
|
|
data->b = new_value;
|
|
|
|
if (interrupted)
|
|
rte_delay_us_block(WRITER_INTERRUPT_TIME);
|
|
|
|
data->a = new_value;
|
|
|
|
rte_seqlock_write_unlock(&data->lock);
|
|
|
|
delay = rte_rand_max(WRITER_MAX_DELAY);
|
|
|
|
rte_delay_us_block(delay);
|
|
}
|
|
|
|
return TEST_SUCCESS;
|
|
}
|
|
|
|
#define INTERRUPTED_READER_FREQUENCY 1000
|
|
#define READER_INTERRUPT_TIME 1000 /* us */
|
|
|
|
static int
|
|
reader_run(void *arg)
|
|
{
|
|
struct reader *r = arg;
|
|
int rc = TEST_SUCCESS;
|
|
|
|
while (__atomic_load_n(&r->stop, __ATOMIC_RELAXED) == 0 &&
|
|
rc == TEST_SUCCESS) {
|
|
struct data *data = r->data;
|
|
bool interrupted;
|
|
uint32_t sn;
|
|
uint64_t a;
|
|
uint64_t b;
|
|
uint64_t c;
|
|
|
|
interrupted = rte_rand_max(INTERRUPTED_READER_FREQUENCY) == 0;
|
|
|
|
do {
|
|
sn = rte_seqlock_read_begin(&data->lock);
|
|
|
|
a = data->a;
|
|
if (interrupted)
|
|
rte_delay_us_block(READER_INTERRUPT_TIME);
|
|
c = data->c;
|
|
b = data->b;
|
|
|
|
} while (rte_seqlock_read_retry(&data->lock, sn));
|
|
|
|
if (a != b || b != c) {
|
|
printf("Reader observed inconsistent data values "
|
|
"%" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
|
|
a, b, c);
|
|
rc = TEST_FAILED;
|
|
}
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static void
|
|
reader_stop(struct reader *reader)
|
|
{
|
|
__atomic_store_n(&reader->stop, 1, __ATOMIC_RELAXED);
|
|
}
|
|
|
|
#define NUM_WRITERS 2 /* main lcore + one worker */
|
|
#define MIN_NUM_READERS 2
|
|
#define MIN_LCORE_COUNT (NUM_WRITERS + MIN_NUM_READERS)
|
|
|
|
/* Only a compile-time test */
|
|
static rte_seqlock_t __rte_unused static_init_lock = RTE_SEQLOCK_INITIALIZER;
|
|
|
|
static int
|
|
test_seqlock(void)
|
|
{
|
|
struct reader readers[RTE_MAX_LCORE];
|
|
unsigned int num_lcores;
|
|
unsigned int num_readers;
|
|
struct data *data;
|
|
unsigned int i;
|
|
unsigned int lcore_id;
|
|
unsigned int reader_lcore_ids[RTE_MAX_LCORE];
|
|
unsigned int worker_writer_lcore_id = 0;
|
|
int rc = TEST_SUCCESS;
|
|
|
|
num_lcores = rte_lcore_count();
|
|
|
|
if (num_lcores < MIN_LCORE_COUNT) {
|
|
printf("Too few cores to run test. Skipping.\n");
|
|
return TEST_SKIPPED;
|
|
}
|
|
|
|
num_readers = num_lcores - NUM_WRITERS;
|
|
|
|
data = rte_zmalloc(NULL, sizeof(struct data), 0);
|
|
|
|
if (data == NULL) {
|
|
printf("Failed to allocate memory for seqlock data\n");
|
|
return TEST_FAILED;
|
|
}
|
|
|
|
i = 0;
|
|
RTE_LCORE_FOREACH_WORKER(lcore_id) {
|
|
if (i == 0) {
|
|
rte_eal_remote_launch(writer_run, data, lcore_id);
|
|
worker_writer_lcore_id = lcore_id;
|
|
} else {
|
|
unsigned int reader_idx = i - 1;
|
|
struct reader *reader = &readers[reader_idx];
|
|
|
|
reader->data = data;
|
|
reader->stop = 0;
|
|
|
|
rte_eal_remote_launch(reader_run, reader, lcore_id);
|
|
reader_lcore_ids[reader_idx] = lcore_id;
|
|
}
|
|
i++;
|
|
}
|
|
|
|
if (writer_run(data) != 0 ||
|
|
rte_eal_wait_lcore(worker_writer_lcore_id) != 0)
|
|
rc = TEST_FAILED;
|
|
|
|
for (i = 0; i < num_readers; i++) {
|
|
reader_stop(&readers[i]);
|
|
if (rte_eal_wait_lcore(reader_lcore_ids[i]) != 0)
|
|
rc = TEST_FAILED;
|
|
}
|
|
|
|
rte_free(data);
|
|
|
|
return rc;
|
|
}
|
|
|
|
REGISTER_TEST_COMMAND(seqlock_autotest, test_seqlock);
|