Import Concurrency Kit in the kernel.
CK is a toolkit providing different lockfree algorithms/data structures. More information can be found here : www.concurrencykit.org
This commit is contained in:
commit
1fb62fb074
100
sys/contrib/ck/include/ck_array.h
Normal file
100
sys/contrib/ck/include/ck_array.h
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright 2013-2015 Samy Al Bahra
|
||||
* Copyright 2013-2014 AppNexus, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_ARRAY_H
|
||||
#define CK_ARRAY_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_malloc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
struct _ck_array {
|
||||
unsigned int n_committed;
|
||||
unsigned int length;
|
||||
void *values[];
|
||||
};
|
||||
|
||||
struct ck_array {
|
||||
struct ck_malloc *allocator;
|
||||
struct _ck_array *active;
|
||||
unsigned int n_entries;
|
||||
struct _ck_array *transaction;
|
||||
};
|
||||
typedef struct ck_array ck_array_t;
|
||||
|
||||
struct ck_array_iterator {
|
||||
struct _ck_array *snapshot;
|
||||
};
|
||||
typedef struct ck_array_iterator ck_array_iterator_t;
|
||||
|
||||
#define CK_ARRAY_MODE_SPMC 0U
|
||||
#define CK_ARRAY_MODE_MPMC (void) /* Unsupported. */
|
||||
|
||||
bool ck_array_init(ck_array_t *, unsigned int, struct ck_malloc *, unsigned int);
|
||||
bool ck_array_commit(ck_array_t *);
|
||||
bool ck_array_put(ck_array_t *, void *);
|
||||
int ck_array_put_unique(ck_array_t *, void *);
|
||||
bool ck_array_remove(ck_array_t *, void *);
|
||||
void ck_array_deinit(ck_array_t *, bool);
|
||||
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_array_length(struct ck_array *array)
|
||||
{
|
||||
struct _ck_array *a = ck_pr_load_ptr(&array->active);
|
||||
|
||||
ck_pr_fence_load();
|
||||
return ck_pr_load_uint(&a->n_committed);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_array_buffer(struct ck_array *array, unsigned int *length)
|
||||
{
|
||||
struct _ck_array *a = ck_pr_load_ptr(&array->active);
|
||||
|
||||
ck_pr_fence_load();
|
||||
*length = ck_pr_load_uint(&a->n_committed);
|
||||
return a->values;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_array_initialized(struct ck_array *array)
|
||||
{
|
||||
|
||||
return ck_pr_load_ptr(&array->active) != NULL;
|
||||
}
|
||||
|
||||
#define CK_ARRAY_FOREACH(a, i, b) \
|
||||
(i)->snapshot = ck_pr_load_ptr(&(a)->active); \
|
||||
ck_pr_fence_load(); \
|
||||
for (unsigned int _ck_i = 0; \
|
||||
_ck_i < (a)->active->n_committed && \
|
||||
((*b) = (a)->active->values[_ck_i], 1); \
|
||||
_ck_i++)
|
||||
|
||||
#endif /* CK_ARRAY_H */
|
57
sys/contrib/ck/include/ck_backoff.h
Normal file
57
sys/contrib/ck/include/ck_backoff.h
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_BACKOFF_H
|
||||
#define CK_BACKOFF_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
|
||||
#ifndef CK_BACKOFF_CEILING
|
||||
#define CK_BACKOFF_CEILING ((1 << 20) - 1)
|
||||
#endif
|
||||
|
||||
#define CK_BACKOFF_INITIALIZER (1 << 9)
|
||||
|
||||
typedef unsigned int ck_backoff_t;
|
||||
|
||||
/*
|
||||
* This is a exponential back-off implementation.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_backoff_eb(unsigned int *c)
|
||||
{
|
||||
unsigned int i, ceiling;
|
||||
|
||||
ceiling = *c;
|
||||
for (i = 0; i < ceiling; i++)
|
||||
ck_pr_barrier();
|
||||
|
||||
*c = ceiling <<= ceiling < CK_BACKOFF_CEILING;
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CK_BACKOFF_H */
|
164
sys/contrib/ck/include/ck_barrier.h
Normal file
164
sys/contrib/ck/include/ck_barrier.h
Normal file
@ -0,0 +1,164 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_BARRIER_H
|
||||
#define CK_BARRIER_H
|
||||
|
||||
#include <ck_spinlock.h>
|
||||
|
||||
struct ck_barrier_centralized {
|
||||
unsigned int value;
|
||||
unsigned int sense;
|
||||
};
|
||||
typedef struct ck_barrier_centralized ck_barrier_centralized_t;
|
||||
|
||||
struct ck_barrier_centralized_state {
|
||||
unsigned int sense;
|
||||
};
|
||||
typedef struct ck_barrier_centralized_state ck_barrier_centralized_state_t;
|
||||
|
||||
#define CK_BARRIER_CENTRALIZED_INITIALIZER {0, 0}
|
||||
#define CK_BARRIER_CENTRALIZED_STATE_INITIALIZER {0}
|
||||
|
||||
void ck_barrier_centralized(ck_barrier_centralized_t *,
|
||||
ck_barrier_centralized_state_t *, unsigned int);
|
||||
|
||||
struct ck_barrier_combining_group {
|
||||
unsigned int k;
|
||||
unsigned int count;
|
||||
unsigned int sense;
|
||||
struct ck_barrier_combining_group *parent;
|
||||
struct ck_barrier_combining_group *left;
|
||||
struct ck_barrier_combining_group *right;
|
||||
struct ck_barrier_combining_group *next;
|
||||
} CK_CC_CACHELINE;
|
||||
typedef struct ck_barrier_combining_group ck_barrier_combining_group_t;
|
||||
|
||||
struct ck_barrier_combining_state {
|
||||
unsigned int sense;
|
||||
};
|
||||
typedef struct ck_barrier_combining_state ck_barrier_combining_state_t;
|
||||
|
||||
#define CK_BARRIER_COMBINING_STATE_INITIALIZER {~0}
|
||||
|
||||
struct ck_barrier_combining {
|
||||
struct ck_barrier_combining_group *root;
|
||||
ck_spinlock_fas_t mutex;
|
||||
};
|
||||
typedef struct ck_barrier_combining ck_barrier_combining_t;
|
||||
|
||||
void ck_barrier_combining_init(ck_barrier_combining_t *, ck_barrier_combining_group_t *);
|
||||
|
||||
void ck_barrier_combining_group_init(ck_barrier_combining_t *,
|
||||
ck_barrier_combining_group_t *, unsigned int);
|
||||
|
||||
void ck_barrier_combining(ck_barrier_combining_t *,
|
||||
ck_barrier_combining_group_t *,
|
||||
ck_barrier_combining_state_t *);
|
||||
|
||||
struct ck_barrier_dissemination_flag {
|
||||
unsigned int tflag;
|
||||
unsigned int *pflag;
|
||||
};
|
||||
typedef struct ck_barrier_dissemination_flag ck_barrier_dissemination_flag_t;
|
||||
|
||||
struct ck_barrier_dissemination {
|
||||
unsigned int nthr;
|
||||
unsigned int size;
|
||||
unsigned int tid;
|
||||
struct ck_barrier_dissemination_flag *flags[2];
|
||||
};
|
||||
typedef struct ck_barrier_dissemination ck_barrier_dissemination_t;
|
||||
|
||||
struct ck_barrier_dissemination_state {
|
||||
int parity;
|
||||
unsigned int sense;
|
||||
unsigned int tid;
|
||||
};
|
||||
typedef struct ck_barrier_dissemination_state ck_barrier_dissemination_state_t;
|
||||
|
||||
void ck_barrier_dissemination_init(ck_barrier_dissemination_t *,
|
||||
ck_barrier_dissemination_flag_t **, unsigned int);
|
||||
|
||||
void ck_barrier_dissemination_subscribe(ck_barrier_dissemination_t *,
|
||||
ck_barrier_dissemination_state_t *);
|
||||
|
||||
unsigned int ck_barrier_dissemination_size(unsigned int);
|
||||
|
||||
void ck_barrier_dissemination(ck_barrier_dissemination_t *,
|
||||
ck_barrier_dissemination_state_t *);
|
||||
|
||||
struct ck_barrier_tournament_round {
|
||||
int role;
|
||||
unsigned int *opponent;
|
||||
unsigned int flag;
|
||||
};
|
||||
typedef struct ck_barrier_tournament_round ck_barrier_tournament_round_t;
|
||||
|
||||
struct ck_barrier_tournament {
|
||||
unsigned int tid;
|
||||
unsigned int size;
|
||||
struct ck_barrier_tournament_round **rounds;
|
||||
};
|
||||
typedef struct ck_barrier_tournament ck_barrier_tournament_t;
|
||||
|
||||
struct ck_barrier_tournament_state {
|
||||
unsigned int sense;
|
||||
unsigned int vpid;
|
||||
};
|
||||
typedef struct ck_barrier_tournament_state ck_barrier_tournament_state_t;
|
||||
|
||||
void ck_barrier_tournament_subscribe(ck_barrier_tournament_t *,
|
||||
ck_barrier_tournament_state_t *);
|
||||
void ck_barrier_tournament_init(ck_barrier_tournament_t *,
|
||||
ck_barrier_tournament_round_t **,
|
||||
unsigned int);
|
||||
unsigned int ck_barrier_tournament_size(unsigned int);
|
||||
void ck_barrier_tournament(ck_barrier_tournament_t *, ck_barrier_tournament_state_t *);
|
||||
|
||||
struct ck_barrier_mcs {
|
||||
unsigned int tid;
|
||||
unsigned int *children[2];
|
||||
unsigned int childnotready[4];
|
||||
unsigned int dummy;
|
||||
unsigned int havechild[4];
|
||||
unsigned int *parent;
|
||||
unsigned int parentsense;
|
||||
};
|
||||
typedef struct ck_barrier_mcs ck_barrier_mcs_t;
|
||||
|
||||
struct ck_barrier_mcs_state {
|
||||
unsigned int sense;
|
||||
unsigned int vpid;
|
||||
};
|
||||
typedef struct ck_barrier_mcs_state ck_barrier_mcs_state_t;
|
||||
|
||||
void ck_barrier_mcs_init(ck_barrier_mcs_t *, unsigned int);
|
||||
void ck_barrier_mcs_subscribe(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
|
||||
void ck_barrier_mcs(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
|
||||
|
||||
#endif /* CK_BARRIER_H */
|
515
sys/contrib/ck/include/ck_bitmap.h
Normal file
515
sys/contrib/ck/include/ck_bitmap.h
Normal file
@ -0,0 +1,515 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra.
|
||||
* Copyright 2012-2014 AppNexus, Inc.
|
||||
* Copyright 2014 Paul Khuong.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_BITMAP_H
|
||||
#define CK_BITMAP_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_limits.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdint.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
#if !defined(CK_F_PR_LOAD_UINT) || !defined(CK_F_PR_STORE_UINT) || \
|
||||
!defined(CK_F_PR_AND_UINT) || !defined(CK_F_PR_OR_UINT) || \
|
||||
!defined(CK_F_CC_CTZ)
|
||||
#error "ck_bitmap is not supported on your platform."
|
||||
#endif
|
||||
|
||||
#define CK_BITMAP_BLOCK (sizeof(unsigned int) * CHAR_BIT)
|
||||
#define CK_BITMAP_OFFSET(i) ((i) % CK_BITMAP_BLOCK)
|
||||
#define CK_BITMAP_BIT(i) (1U << CK_BITMAP_OFFSET(i))
|
||||
#define CK_BITMAP_PTR(x, i) ((x) + ((i) / CK_BITMAP_BLOCK))
|
||||
#define CK_BITMAP_BLOCKS(n) (((n) + CK_BITMAP_BLOCK - 1) / CK_BITMAP_BLOCK)
|
||||
|
||||
#define CK_BITMAP_INSTANCE(n_entries) \
|
||||
union { \
|
||||
struct { \
|
||||
unsigned int n_bits; \
|
||||
unsigned int map[CK_BITMAP_BLOCKS(n_entries)]; \
|
||||
} content; \
|
||||
struct ck_bitmap bitmap; \
|
||||
}
|
||||
|
||||
#define CK_BITMAP_ITERATOR_INIT(a, b) \
|
||||
ck_bitmap_iterator_init((a), &(b)->bitmap)
|
||||
|
||||
#define CK_BITMAP_INIT(a, b, c) \
|
||||
ck_bitmap_init(&(a)->bitmap, (b), (c))
|
||||
|
||||
#define CK_BITMAP_NEXT(a, b, c) \
|
||||
ck_bitmap_next(&(a)->bitmap, (b), (c))
|
||||
|
||||
#define CK_BITMAP_SET(a, b) \
|
||||
ck_bitmap_set(&(a)->bitmap, (b))
|
||||
|
||||
#define CK_BITMAP_BTS(a, b) \
|
||||
ck_bitmap_bts(&(a)->bitmap, (b))
|
||||
|
||||
#define CK_BITMAP_RESET(a, b) \
|
||||
ck_bitmap_reset(&(a)->bitmap, (b))
|
||||
|
||||
#define CK_BITMAP_TEST(a, b) \
|
||||
ck_bitmap_test(&(a)->bitmap, (b))
|
||||
|
||||
#define CK_BITMAP_UNION(a, b) \
|
||||
ck_bitmap_union(&(a)->bitmap, &(b)->bitmap)
|
||||
|
||||
#define CK_BITMAP_INTERSECTION(a, b) \
|
||||
ck_bitmap_intersection(&(a)->bitmap, &(b)->bitmap)
|
||||
|
||||
#define CK_BITMAP_INTERSECTION_NEGATE(a, b) \
|
||||
ck_bitmap_intersection_negate(&(a)->bitmap, &(b)->bitmap)
|
||||
|
||||
#define CK_BITMAP_CLEAR(a) \
|
||||
ck_bitmap_clear(&(a)->bitmap)
|
||||
|
||||
#define CK_BITMAP_EMPTY(a, b) \
|
||||
ck_bitmap_empty(&(a)->bitmap, b)
|
||||
|
||||
#define CK_BITMAP_FULL(a, b) \
|
||||
ck_bitmap_full(&(a)->bitmap, b)
|
||||
|
||||
#define CK_BITMAP_COUNT(a, b) \
|
||||
ck_bitmap_count(&(a)->bitmap, b)
|
||||
|
||||
#define CK_BITMAP_COUNT_INTERSECT(a, b, c) \
|
||||
ck_bitmap_count_intersect(&(a)->bitmap, b, c)
|
||||
|
||||
#define CK_BITMAP_BITS(a) \
|
||||
ck_bitmap_bits(&(a)->bitmap)
|
||||
|
||||
#define CK_BITMAP_BUFFER(a) \
|
||||
ck_bitmap_buffer(&(a)->bitmap)
|
||||
|
||||
#define CK_BITMAP(a) \
|
||||
(&(a)->bitmap)
|
||||
|
||||
struct ck_bitmap {
|
||||
unsigned int n_bits;
|
||||
unsigned int map[];
|
||||
};
|
||||
typedef struct ck_bitmap ck_bitmap_t;
|
||||
|
||||
struct ck_bitmap_iterator {
|
||||
unsigned int cache;
|
||||
unsigned int n_block;
|
||||
unsigned int n_limit;
|
||||
};
|
||||
typedef struct ck_bitmap_iterator ck_bitmap_iterator_t;
|
||||
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_bitmap_base(unsigned int n_bits)
|
||||
{
|
||||
|
||||
return CK_BITMAP_BLOCKS(n_bits) * sizeof(unsigned int);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the required number of bytes for a ck_bitmap_t object supporting the
|
||||
* specified number of bits.
|
||||
*/
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_bitmap_size(unsigned int n_bits)
|
||||
{
|
||||
|
||||
return ck_bitmap_base(n_bits) + sizeof(struct ck_bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns total number of bits in specified bitmap.
|
||||
*/
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_bitmap_bits(const struct ck_bitmap *bitmap)
|
||||
{
|
||||
|
||||
return bitmap->n_bits;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a pointer to the bit buffer associated
|
||||
* with the specified bitmap.
|
||||
*/
|
||||
CK_CC_INLINE static void *
|
||||
ck_bitmap_buffer(struct ck_bitmap *bitmap)
|
||||
{
|
||||
|
||||
return bitmap->map;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the bit at the offset specified in the second argument.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_set(struct ck_bitmap *bitmap, unsigned int n)
|
||||
{
|
||||
|
||||
ck_pr_or_uint(CK_BITMAP_PTR(bitmap->map, n), CK_BITMAP_BIT(n));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Performs a test-and-set operation at the offset specified in the
|
||||
* second argument.
|
||||
* Returns true if the bit at the specified offset was already set,
|
||||
* false otherwise.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_bitmap_bts(struct ck_bitmap *bitmap, unsigned int n)
|
||||
{
|
||||
|
||||
return ck_pr_bts_uint(CK_BITMAP_PTR(bitmap->map, n),
|
||||
CK_BITMAP_OFFSET(n));
|
||||
}
|
||||
|
||||
/*
|
||||
* Resets the bit at the offset specified in the second argument.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_reset(struct ck_bitmap *bitmap, unsigned int n)
|
||||
{
|
||||
|
||||
ck_pr_and_uint(CK_BITMAP_PTR(bitmap->map, n), ~CK_BITMAP_BIT(n));
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determines whether the bit at offset specified in the
|
||||
* second argument is set.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_bitmap_test(const struct ck_bitmap *bitmap, unsigned int n)
|
||||
{
|
||||
unsigned int block;
|
||||
|
||||
block = ck_pr_load_uint(CK_BITMAP_PTR(bitmap->map, n));
|
||||
return block & CK_BITMAP_BIT(n);
|
||||
}
|
||||
|
||||
/*
|
||||
* Combines bits from second bitmap into the first bitmap. This is not a
|
||||
* linearized operation with respect to the complete bitmap.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_union(struct ck_bitmap *dst, const struct ck_bitmap *src)
|
||||
{
|
||||
unsigned int n;
|
||||
unsigned int n_buckets = dst->n_bits;
|
||||
|
||||
if (src->n_bits < dst->n_bits)
|
||||
n_buckets = src->n_bits;
|
||||
|
||||
n_buckets = CK_BITMAP_BLOCKS(n_buckets);
|
||||
for (n = 0; n < n_buckets; n++) {
|
||||
ck_pr_or_uint(&dst->map[n],
|
||||
ck_pr_load_uint(&src->map[n]));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intersects bits from second bitmap into the first bitmap. This is
|
||||
* not a linearized operation with respect to the complete bitmap.
|
||||
* Any trailing bit in dst is cleared.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_intersection(struct ck_bitmap *dst, const struct ck_bitmap *src)
|
||||
{
|
||||
unsigned int n;
|
||||
unsigned int n_buckets = dst->n_bits;
|
||||
unsigned int n_intersect = n_buckets;
|
||||
|
||||
if (src->n_bits < n_intersect)
|
||||
n_intersect = src->n_bits;
|
||||
|
||||
n_buckets = CK_BITMAP_BLOCKS(n_buckets);
|
||||
n_intersect = CK_BITMAP_BLOCKS(n_intersect);
|
||||
for (n = 0; n < n_intersect; n++) {
|
||||
ck_pr_and_uint(&dst->map[n],
|
||||
ck_pr_load_uint(&src->map[n]));
|
||||
}
|
||||
|
||||
for (; n < n_buckets; n++)
|
||||
ck_pr_store_uint(&dst->map[n], 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Intersects the complement of bits from second bitmap into the first
|
||||
* bitmap. This is not a linearized operation with respect to the
|
||||
* complete bitmap. Any trailing bit in dst is left as is.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_intersection_negate(struct ck_bitmap *dst,
|
||||
const struct ck_bitmap *src)
|
||||
{
|
||||
unsigned int n;
|
||||
unsigned int n_intersect = dst->n_bits;
|
||||
|
||||
if (src->n_bits < n_intersect)
|
||||
n_intersect = src->n_bits;
|
||||
|
||||
n_intersect = CK_BITMAP_BLOCKS(n_intersect);
|
||||
for (n = 0; n < n_intersect; n++) {
|
||||
ck_pr_and_uint(&dst->map[n],
|
||||
(~ck_pr_load_uint(&src->map[n])));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resets all bits in the provided bitmap. This is not a linearized
|
||||
* operation in ck_bitmap.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_clear(struct ck_bitmap *bitmap)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int n_buckets = ck_bitmap_base(bitmap->n_bits) /
|
||||
sizeof(unsigned int);
|
||||
|
||||
for (i = 0; i < n_buckets; i++)
|
||||
ck_pr_store_uint(&bitmap->map[i], 0);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the first limit bits in bitmap are cleared. If
|
||||
* limit is greater than the bitmap size, limit is truncated to that
|
||||
* size.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_bitmap_empty(const ck_bitmap_t *bitmap, unsigned int limit)
|
||||
{
|
||||
unsigned int i, words, slop;
|
||||
|
||||
if (limit > bitmap->n_bits)
|
||||
limit = bitmap->n_bits;
|
||||
|
||||
words = limit / CK_BITMAP_BLOCK;
|
||||
slop = limit % CK_BITMAP_BLOCK;
|
||||
for (i = 0; i < words; i++) {
|
||||
if (ck_pr_load_uint(&bitmap->map[i]) != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (slop > 0) {
|
||||
unsigned int word;
|
||||
|
||||
word = ck_pr_load_uint(&bitmap->map[i]);
|
||||
if ((word & ((1U << slop) - 1)) != 0)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the first limit bits in bitmap are set. If limit
|
||||
* is greater than the bitmap size, limit is truncated to that size.
|
||||
*/
|
||||
CK_CC_UNUSED static bool
|
||||
ck_bitmap_full(const ck_bitmap_t *bitmap, unsigned int limit)
|
||||
{
|
||||
unsigned int i, slop, words;
|
||||
|
||||
if (limit > bitmap->n_bits) {
|
||||
limit = bitmap->n_bits;
|
||||
}
|
||||
|
||||
words = limit / CK_BITMAP_BLOCK;
|
||||
slop = limit % CK_BITMAP_BLOCK;
|
||||
for (i = 0; i < words; i++) {
|
||||
if (ck_pr_load_uint(&bitmap->map[i]) != -1U)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (slop > 0) {
|
||||
unsigned int word;
|
||||
|
||||
word = ~ck_pr_load_uint(&bitmap->map[i]);
|
||||
if ((word & ((1U << slop) - 1)) != 0)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the number of set bit in bitmap, upto (and excluding)
|
||||
* limit. If limit is greater than the bitmap size, it is truncated
|
||||
* to that size.
|
||||
*/
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_bitmap_count(const ck_bitmap_t *bitmap, unsigned int limit)
|
||||
{
|
||||
unsigned int count, i, slop, words;
|
||||
|
||||
if (limit > bitmap->n_bits)
|
||||
limit = bitmap->n_bits;
|
||||
|
||||
words = limit / CK_BITMAP_BLOCK;
|
||||
slop = limit % CK_BITMAP_BLOCK;
|
||||
for (i = 0, count = 0; i < words; i++)
|
||||
count += ck_cc_popcount(ck_pr_load_uint(&bitmap->map[i]));
|
||||
|
||||
if (slop > 0) {
|
||||
unsigned int word;
|
||||
|
||||
word = ck_pr_load_uint(&bitmap->map[i]);
|
||||
count += ck_cc_popcount(word & ((1U << slop) - 1));
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the number of set bit in the intersection of two bitmaps,
|
||||
* upto (and excluding) limit. If limit is greater than either bitmap
|
||||
* size, it is truncated to the smallest.
|
||||
*/
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_bitmap_count_intersect(const ck_bitmap_t *x, const ck_bitmap_t *y,
|
||||
unsigned int limit)
|
||||
{
|
||||
unsigned int count, i, slop, words;
|
||||
|
||||
if (limit > x->n_bits)
|
||||
limit = x->n_bits;
|
||||
|
||||
if (limit > y->n_bits)
|
||||
limit = y->n_bits;
|
||||
|
||||
words = limit / CK_BITMAP_BLOCK;
|
||||
slop = limit % CK_BITMAP_BLOCK;
|
||||
for (i = 0, count = 0; i < words; i++) {
|
||||
unsigned int xi, yi;
|
||||
|
||||
xi = ck_pr_load_uint(&x->map[i]);
|
||||
yi = ck_pr_load_uint(&y->map[i]);
|
||||
count += ck_cc_popcount(xi & yi);
|
||||
}
|
||||
|
||||
if (slop > 0) {
|
||||
unsigned int word, xi, yi;
|
||||
|
||||
xi = ck_pr_load_uint(&x->map[i]);
|
||||
yi = ck_pr_load_uint(&y->map[i]);
|
||||
word = xi & yi;
|
||||
count += ck_cc_popcount(word & ((1U << slop) - 1));
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initializes a ck_bitmap pointing to a region of memory with
|
||||
* ck_bitmap_size(n_bits) bytes. Third argument determines whether
|
||||
* default bit value is 1 (true) or 0 (false).
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_init(struct ck_bitmap *bitmap,
|
||||
unsigned int n_bits,
|
||||
bool set)
|
||||
{
|
||||
unsigned int base = ck_bitmap_base(n_bits);
|
||||
|
||||
bitmap->n_bits = n_bits;
|
||||
memset(bitmap->map, -(int)set, base);
|
||||
|
||||
if (set == true) {
|
||||
unsigned int b = n_bits % CK_BITMAP_BLOCK;
|
||||
|
||||
if (b == 0)
|
||||
return;
|
||||
|
||||
*CK_BITMAP_PTR(bitmap->map, n_bits - 1) &= (1U << b) - 1U;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize iterator for use with provided bitmap.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_bitmap_iterator_init(struct ck_bitmap_iterator *i,
|
||||
const struct ck_bitmap *bitmap)
|
||||
{
|
||||
|
||||
i->n_block = 0;
|
||||
i->n_limit = CK_BITMAP_BLOCKS(bitmap->n_bits);
|
||||
if (i->n_limit > 0) {
|
||||
i->cache = ck_pr_load_uint(&bitmap->map[0]);
|
||||
} else {
|
||||
i->cache = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate to next bit.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_bitmap_next(const struct ck_bitmap *bitmap,
|
||||
struct ck_bitmap_iterator *i,
|
||||
unsigned int *bit)
|
||||
{
|
||||
unsigned int cache = i->cache;
|
||||
unsigned int n_block = i->n_block;
|
||||
unsigned int n_limit = i->n_limit;
|
||||
|
||||
if (cache == 0) {
|
||||
if (n_block >= n_limit)
|
||||
return false;
|
||||
|
||||
for (n_block++; n_block < n_limit; n_block++) {
|
||||
cache = ck_pr_load_uint(&bitmap->map[n_block]);
|
||||
if (cache != 0)
|
||||
goto non_zero;
|
||||
}
|
||||
|
||||
i->cache = 0;
|
||||
i->n_block = n_block;
|
||||
return false;
|
||||
}
|
||||
|
||||
non_zero:
|
||||
*bit = CK_BITMAP_BLOCK * n_block + ck_cc_ctz(cache);
|
||||
i->cache = cache & (cache - 1);
|
||||
i->n_block = n_block;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CK_BITMAP_H */
|
279
sys/contrib/ck/include/ck_brlock.h
Normal file
279
sys/contrib/ck/include/ck_brlock.h
Normal file
@ -0,0 +1,279 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_BRLOCK_H
|
||||
#define CK_BRLOCK_H
|
||||
|
||||
/*
|
||||
* Big reader spinlocks provide cache-local contention-free read
|
||||
* lock acquisition in the absence of writers. This comes at the
|
||||
* cost of O(n) write lock acquisition. They were first implemented
|
||||
* in the Linux kernel by Ingo Molnar and David S. Miller around the
|
||||
* year 2000.
|
||||
*
|
||||
* This implementation is thread-agnostic which comes at the cost
|
||||
* of larger reader objects due to necessary linkage overhead. In
|
||||
* order to cut down on TLB pressure, it is recommended to allocate
|
||||
* these objects on the same page.
|
||||
*/
|
||||
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
struct ck_brlock_reader {
|
||||
unsigned int n_readers;
|
||||
struct ck_brlock_reader *previous;
|
||||
struct ck_brlock_reader *next;
|
||||
};
|
||||
typedef struct ck_brlock_reader ck_brlock_reader_t;
|
||||
|
||||
#define CK_BRLOCK_READER_INITIALIZER {0}
|
||||
|
||||
struct ck_brlock {
|
||||
struct ck_brlock_reader *readers;
|
||||
unsigned int writer;
|
||||
};
|
||||
typedef struct ck_brlock ck_brlock_t;
|
||||
|
||||
#define CK_BRLOCK_INITIALIZER {NULL, false}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_brlock_init(struct ck_brlock *br)
|
||||
{
|
||||
|
||||
br->readers = NULL;
|
||||
br->writer = false;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_brlock_write_lock(struct ck_brlock *br)
|
||||
{
|
||||
struct ck_brlock_reader *cursor;
|
||||
|
||||
/*
|
||||
* As the frequency of write acquisitions should be low,
|
||||
* there is no point to more advanced contention avoidance.
|
||||
*/
|
||||
while (ck_pr_fas_uint(&br->writer, true) == true)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
/* The reader list is protected under the writer br. */
|
||||
for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
|
||||
while (ck_pr_load_uint(&cursor->n_readers) != 0)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_brlock_write_unlock(struct ck_brlock *br)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&br->writer, false);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_brlock_write_trylock(struct ck_brlock *br, unsigned int factor)
|
||||
{
|
||||
struct ck_brlock_reader *cursor;
|
||||
unsigned int steps = 0;
|
||||
|
||||
while (ck_pr_fas_uint(&br->writer, true) == true) {
|
||||
if (++steps >= factor)
|
||||
return false;
|
||||
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
/*
|
||||
* We do not require a strict fence here as atomic RMW operations
|
||||
* are serializing.
|
||||
*/
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
|
||||
while (ck_pr_load_uint(&cursor->n_readers) != 0) {
|
||||
if (++steps >= factor) {
|
||||
ck_brlock_write_unlock(br);
|
||||
return false;
|
||||
}
|
||||
|
||||
ck_pr_stall();
|
||||
}
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_brlock_read_register(struct ck_brlock *br, struct ck_brlock_reader *reader)
|
||||
{
|
||||
|
||||
reader->n_readers = 0;
|
||||
reader->previous = NULL;
|
||||
|
||||
/* Implicit compiler barrier. */
|
||||
ck_brlock_write_lock(br);
|
||||
|
||||
reader->next = ck_pr_load_ptr(&br->readers);
|
||||
if (reader->next != NULL)
|
||||
reader->next->previous = reader;
|
||||
ck_pr_store_ptr(&br->readers, reader);
|
||||
|
||||
ck_brlock_write_unlock(br);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_brlock_read_unregister(struct ck_brlock *br, struct ck_brlock_reader *reader)
|
||||
{
|
||||
|
||||
ck_brlock_write_lock(br);
|
||||
|
||||
if (reader->next != NULL)
|
||||
reader->next->previous = reader->previous;
|
||||
|
||||
if (reader->previous != NULL)
|
||||
reader->previous->next = reader->next;
|
||||
else
|
||||
br->readers = reader->next;
|
||||
|
||||
ck_brlock_write_unlock(br);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_brlock_read_lock(struct ck_brlock *br, struct ck_brlock_reader *reader)
|
||||
{
|
||||
|
||||
if (reader->n_readers >= 1) {
|
||||
ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
while (ck_pr_load_uint(&br->writer) == true)
|
||||
ck_pr_stall();
|
||||
|
||||
#if defined(__x86__) || defined(__x86_64__)
|
||||
ck_pr_fas_uint(&reader->n_readers, 1);
|
||||
|
||||
/*
|
||||
* Serialize reader counter update with respect to load of
|
||||
* writer.
|
||||
*/
|
||||
ck_pr_fence_atomic_load();
|
||||
#else
|
||||
ck_pr_store_uint(&reader->n_readers, 1);
|
||||
|
||||
/*
|
||||
* Serialize reader counter update with respect to load of
|
||||
* writer.
|
||||
*/
|
||||
ck_pr_fence_store_load();
|
||||
#endif
|
||||
|
||||
if (ck_pr_load_uint(&br->writer) == false)
|
||||
break;
|
||||
|
||||
ck_pr_store_uint(&reader->n_readers, 0);
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_brlock_read_trylock(struct ck_brlock *br,
|
||||
struct ck_brlock_reader *reader,
|
||||
unsigned int factor)
|
||||
{
|
||||
unsigned int steps = 0;
|
||||
|
||||
if (reader->n_readers >= 1) {
|
||||
ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
while (ck_pr_load_uint(&br->writer) == true) {
|
||||
if (++steps >= factor)
|
||||
return false;
|
||||
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
#if defined(__x86__) || defined(__x86_64__)
|
||||
ck_pr_fas_uint(&reader->n_readers, 1);
|
||||
|
||||
/*
|
||||
* Serialize reader counter update with respect to load of
|
||||
* writer.
|
||||
*/
|
||||
ck_pr_fence_atomic_load();
|
||||
#else
|
||||
ck_pr_store_uint(&reader->n_readers, 1);
|
||||
|
||||
/*
|
||||
* Serialize reader counter update with respect to load of
|
||||
* writer.
|
||||
*/
|
||||
ck_pr_fence_store_load();
|
||||
#endif
|
||||
|
||||
if (ck_pr_load_uint(&br->writer) == false)
|
||||
break;
|
||||
|
||||
ck_pr_store_uint(&reader->n_readers, 0);
|
||||
|
||||
if (++steps >= factor)
|
||||
return false;
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_brlock_read_unlock(struct ck_brlock_reader *reader)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&reader->n_readers, reader->n_readers - 1);
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CK_BRLOCK_H */
|
196
sys/contrib/ck/include/ck_bytelock.h
Normal file
196
sys/contrib/ck/include/ck_bytelock.h
Normal file
@ -0,0 +1,196 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_BYTELOCK_H
|
||||
#define CK_BYTELOCK_H
|
||||
|
||||
/*
|
||||
* The implementations here are derived from the work described in:
|
||||
* Dice, D. and Shavit, N. 2010. TLRW: return of the read-write lock.
|
||||
* In Proceedings of the 22nd ACM Symposium on Parallelism in Algorithms
|
||||
* and Architectures (Thira, Santorini, Greece, June 13 - 15, 2010).
|
||||
* SPAA '10. ACM, New York, NY, 284-293.
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
#include <ck_limits.h>
|
||||
|
||||
struct ck_bytelock {
|
||||
unsigned int owner;
|
||||
unsigned int n_readers;
|
||||
uint8_t readers[CK_MD_CACHELINE - sizeof(unsigned int) * 2] CK_CC_ALIGN(8);
|
||||
};
|
||||
typedef struct ck_bytelock ck_bytelock_t;
|
||||
|
||||
#define CK_BYTELOCK_INITIALIZER { 0, 0, {0} }
|
||||
#define CK_BYTELOCK_UNSLOTTED UINT_MAX
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_bytelock_init(struct ck_bytelock *bytelock)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
bytelock->owner = 0;
|
||||
bytelock->n_readers = 0;
|
||||
for (i = 0; i < sizeof bytelock->readers; i++)
|
||||
bytelock->readers[i] = false;
|
||||
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CK_F_PR_LOAD_64
|
||||
#define CK_BYTELOCK_LENGTH sizeof(uint64_t)
|
||||
#define CK_BYTELOCK_LOAD ck_pr_load_64
|
||||
#define CK_BYTELOCK_TYPE uint64_t
|
||||
#elif defined(CK_F_PR_LOAD_32)
|
||||
#define CK_BYTELOCK_LENGTH sizeof(uint32_t)
|
||||
#define CK_BYTELOCK_LOAD ck_pr_load_32
|
||||
#define CK_BYTELOCK_TYPE uint32_t
|
||||
#else
|
||||
#error Unsupported platform.
|
||||
#endif
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_bytelock_write_lock(struct ck_bytelock *bytelock, unsigned int slot)
|
||||
{
|
||||
CK_BYTELOCK_TYPE *readers = (void *)bytelock->readers;
|
||||
unsigned int i;
|
||||
|
||||
/* Announce upcoming writer acquisition. */
|
||||
while (ck_pr_cas_uint(&bytelock->owner, 0, slot) == false)
|
||||
ck_pr_stall();
|
||||
|
||||
/* If we are slotted, we might be upgrading from a read lock. */
|
||||
if (slot <= sizeof bytelock->readers)
|
||||
ck_pr_store_8(&bytelock->readers[slot - 1], false);
|
||||
|
||||
/*
|
||||
* Wait for slotted readers to drain out. This also provides the
|
||||
* lock acquire semantics.
|
||||
*/
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
for (i = 0; i < sizeof(bytelock->readers) / CK_BYTELOCK_LENGTH; i++) {
|
||||
while (CK_BYTELOCK_LOAD(&readers[i]) != false)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
/* Wait for unslotted readers to drain out. */
|
||||
while (ck_pr_load_uint(&bytelock->n_readers) != 0)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
#undef CK_BYTELOCK_LENGTH
|
||||
#undef CK_BYTELOCK_LOAD
|
||||
#undef CK_BYTELOCK_TYPE
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_bytelock_write_unlock(struct ck_bytelock *bytelock)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&bytelock->owner, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_bytelock_read_lock(struct ck_bytelock *bytelock, unsigned int slot)
|
||||
{
|
||||
|
||||
if (ck_pr_load_uint(&bytelock->owner) == slot) {
|
||||
ck_pr_store_8(&bytelock->readers[slot - 1], true);
|
||||
ck_pr_fence_strict_store();
|
||||
ck_pr_store_uint(&bytelock->owner, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unslotted threads will have to use the readers counter. */
|
||||
if (slot > sizeof bytelock->readers) {
|
||||
for (;;) {
|
||||
ck_pr_inc_uint(&bytelock->n_readers);
|
||||
ck_pr_fence_atomic_load();
|
||||
if (ck_pr_load_uint(&bytelock->owner) == 0)
|
||||
break;
|
||||
ck_pr_dec_uint(&bytelock->n_readers);
|
||||
|
||||
while (ck_pr_load_uint(&bytelock->owner) != 0)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
slot -= 1;
|
||||
for (;;) {
|
||||
#ifdef CK_F_PR_FAA_8
|
||||
ck_pr_fas_8(&bytelock->readers[slot], true);
|
||||
ck_pr_fence_atomic_load();
|
||||
#else
|
||||
ck_pr_store_8(&bytelock->readers[slot], true);
|
||||
ck_pr_fence_store_load();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If there is no owner at this point, our slot has
|
||||
* already been published and it is guaranteed no
|
||||
* write acquisition will succeed until we drain out.
|
||||
*/
|
||||
if (ck_pr_load_uint(&bytelock->owner) == 0)
|
||||
break;
|
||||
|
||||
ck_pr_store_8(&bytelock->readers[slot], false);
|
||||
while (ck_pr_load_uint(&bytelock->owner) != 0)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
|
||||
if (slot > sizeof bytelock->readers)
|
||||
ck_pr_dec_uint(&bytelock->n_readers);
|
||||
else
|
||||
ck_pr_store_8(&bytelock->readers[slot - 1], false);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CK_BYTELOCK_H */
|
180
sys/contrib/ck/include/ck_cc.h
Normal file
180
sys/contrib/ck/include/ck_cc.h
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* Copyright 2014 Paul Khuong.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_CC_H
|
||||
#define CK_CC_H
|
||||
|
||||
#if defined(__GNUC__) || defined(__SUNPRO_C)
|
||||
#include "gcc/ck_cc.h"
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_RESTRICT
|
||||
#define CK_CC_RESTRICT
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_INLINE
|
||||
#define CK_CC_INLINE inline
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_FORCE_INLINE
|
||||
#define CK_CC_FORCE_INLINE inline
|
||||
#endif
|
||||
|
||||
#define CK_CC_DECONST_PTR(X) ((void *)(uintptr_t)(X))
|
||||
|
||||
/*
|
||||
* Container function.
|
||||
* This relies on (compiler) implementation-defined behavior.
|
||||
*/
|
||||
#define CK_CC_CONTAINER(F, T, M, N) \
|
||||
CK_CC_INLINE static T * \
|
||||
N(F *p) \
|
||||
{ \
|
||||
F *n = p; \
|
||||
return (T *)(void *)(((char *)n) - ((size_t)&((T *)0)->M)); \
|
||||
}
|
||||
|
||||
#define CK_CC_PAD(x) union { char pad[x]; }
|
||||
|
||||
#ifndef CK_CC_ALIASED
|
||||
#define CK_CC_ALIASED
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_UNUSED
|
||||
#define CK_CC_UNUSED
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_USED
|
||||
#define CK_CC_USED
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_IMM
|
||||
#define CK_CC_IMM
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_PACKED
|
||||
#define CK_CC_PACKED
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_WEAKREF
|
||||
#define CK_CC_WEAKREF
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_ALIGN
|
||||
#define CK_CC_ALIGN(X)
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_CACHELINE
|
||||
#define CK_CC_CACHELINE
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_LIKELY
|
||||
#define CK_CC_LIKELY(x) x
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_UNLIKELY
|
||||
#define CK_CC_UNLIKELY(x) x
|
||||
#endif
|
||||
|
||||
#ifndef CK_CC_TYPEOF
|
||||
#define CK_CC_TYPEOF(X, DEFAULT) (DEFAULT)
|
||||
#endif
|
||||
|
||||
#ifndef CK_F_CC_FFS
|
||||
#define CK_F_CC_FFS
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_ffs(unsigned int x)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (x == 0)
|
||||
return 0;
|
||||
|
||||
for (i = 1; (x & 1) == 0; i++, x >>= 1);
|
||||
|
||||
return i;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CK_F_CC_CLZ
|
||||
#define CK_F_CC_CLZ
|
||||
#include <ck_limits.h>
|
||||
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_clz(unsigned int x)
|
||||
{
|
||||
unsigned int count, i;
|
||||
|
||||
for (count = 0, i = sizeof(unsigned int) * CHAR_BIT; i > 0; count++) {
|
||||
unsigned int bit = 1U << --i;
|
||||
|
||||
if (x & bit)
|
||||
break;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CK_F_CC_CTZ
|
||||
#define CK_F_CC_CTZ
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_ctz(unsigned int x)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (x == 0)
|
||||
return 0;
|
||||
|
||||
for (i = 0; (x & 1) == 0; i++, x >>= 1);
|
||||
|
||||
return i;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CK_F_CC_POPCOUNT
|
||||
#define CK_F_CC_POPCOUNT
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_popcount(unsigned int x)
|
||||
{
|
||||
unsigned int acc;
|
||||
|
||||
for (acc = 0; x != 0; x >>= 1)
|
||||
acc += x & 1;
|
||||
|
||||
return acc;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
#define CK_CPP_CAST(type, arg) static_cast<type>(arg)
|
||||
#else
|
||||
#define CK_CPP_CAST(type, arg) arg
|
||||
#endif
|
||||
|
||||
#endif /* CK_CC_H */
|
161
sys/contrib/ck/include/ck_cohort.h
Normal file
161
sys/contrib/ck/include/ck_cohort.h
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright 2013-2015 Samy Al Bahra.
|
||||
* Copyright 2013 Brendon Scheinman.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_COHORT_H
|
||||
#define CK_COHORT_H
|
||||
|
||||
/*
|
||||
* This is an implementation of lock cohorts as described in:
|
||||
* Dice, D.; Marathe, V.; and Shavit, N. 2012.
|
||||
* Lock Cohorting: A General Technique for Designing NUMA Locks
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
enum ck_cohort_state {
|
||||
CK_COHORT_STATE_GLOBAL = 0,
|
||||
CK_COHORT_STATE_LOCAL = 1
|
||||
};
|
||||
|
||||
#define CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT 10
|
||||
|
||||
#define CK_COHORT_NAME(N) ck_cohort_##N
|
||||
#define CK_COHORT_INSTANCE(N) struct CK_COHORT_NAME(N)
|
||||
#define CK_COHORT_INIT(N, C, GL, LL, P) ck_cohort_##N##_init(C, GL, LL, P)
|
||||
#define CK_COHORT_LOCK(N, C, GC, LC) ck_cohort_##N##_lock(C, GC, LC)
|
||||
#define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC)
|
||||
#define CK_COHORT_TRYLOCK(N, C, GLC, LLC, LUC) ck_cohort_##N##_trylock(C, GLC, LLC, LUC)
|
||||
#define CK_COHORT_LOCKED(N, C, GC, LC) ck_cohort_##N##_locked(C, GC, LC)
|
||||
|
||||
#define CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \
|
||||
CK_COHORT_INSTANCE(N) { \
|
||||
void *global_lock; \
|
||||
void *local_lock; \
|
||||
enum ck_cohort_state release_state; \
|
||||
unsigned int waiting_threads; \
|
||||
unsigned int acquire_count; \
|
||||
unsigned int local_pass_limit; \
|
||||
}; \
|
||||
\
|
||||
CK_CC_INLINE static void \
|
||||
ck_cohort_##N##_init(struct ck_cohort_##N *cohort, \
|
||||
void *global_lock, void *local_lock, unsigned int pass_limit) \
|
||||
{ \
|
||||
cohort->global_lock = global_lock; \
|
||||
cohort->local_lock = local_lock; \
|
||||
cohort->release_state = CK_COHORT_STATE_GLOBAL; \
|
||||
cohort->waiting_threads = 0; \
|
||||
cohort->acquire_count = 0; \
|
||||
cohort->local_pass_limit = pass_limit; \
|
||||
ck_pr_barrier(); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static void \
|
||||
ck_cohort_##N##_lock(CK_COHORT_INSTANCE(N) *cohort, \
|
||||
void *global_context, void *local_context) \
|
||||
{ \
|
||||
\
|
||||
ck_pr_inc_uint(&cohort->waiting_threads); \
|
||||
LL(cohort->local_lock, local_context); \
|
||||
ck_pr_dec_uint(&cohort->waiting_threads); \
|
||||
\
|
||||
if (cohort->release_state == CK_COHORT_STATE_GLOBAL) { \
|
||||
GL(cohort->global_lock, global_context); \
|
||||
} \
|
||||
\
|
||||
++cohort->acquire_count; \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static void \
|
||||
ck_cohort_##N##_unlock(CK_COHORT_INSTANCE(N) *cohort, \
|
||||
void *global_context, void *local_context) \
|
||||
{ \
|
||||
\
|
||||
if (ck_pr_load_uint(&cohort->waiting_threads) > 0 \
|
||||
&& cohort->acquire_count < cohort->local_pass_limit) { \
|
||||
cohort->release_state = CK_COHORT_STATE_LOCAL; \
|
||||
} else { \
|
||||
GU(cohort->global_lock, global_context); \
|
||||
cohort->release_state = CK_COHORT_STATE_GLOBAL; \
|
||||
cohort->acquire_count = 0; \
|
||||
} \
|
||||
\
|
||||
ck_pr_fence_release(); \
|
||||
LU(cohort->local_lock, local_context); \
|
||||
\
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_cohort_##N##_locked(CK_COHORT_INSTANCE(N) *cohort, \
|
||||
void *global_context, void *local_context) \
|
||||
{ \
|
||||
return GI(cohort->local_lock, local_context) || \
|
||||
LI(cohort->global_lock, global_context); \
|
||||
}
|
||||
|
||||
#define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GI, GTL, LL, LU, LI, LTL) \
|
||||
CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_cohort_##N##_trylock(CK_COHORT_INSTANCE(N) *cohort, \
|
||||
void *global_context, void *local_context, \
|
||||
void *local_unlock_context) \
|
||||
{ \
|
||||
\
|
||||
bool trylock_result; \
|
||||
\
|
||||
ck_pr_inc_uint(&cohort->waiting_threads); \
|
||||
trylock_result = LTL(cohort->local_lock, local_context); \
|
||||
ck_pr_dec_uint(&cohort->waiting_threads); \
|
||||
if (trylock_result == false) { \
|
||||
return false; \
|
||||
} \
|
||||
\
|
||||
if (cohort->release_state == CK_COHORT_STATE_GLOBAL && \
|
||||
GTL(cohort->global_lock, global_context) == false) { \
|
||||
LU(cohort->local_lock, local_unlock_context); \
|
||||
return false; \
|
||||
} \
|
||||
\
|
||||
++cohort->acquire_count; \
|
||||
return true; \
|
||||
}
|
||||
|
||||
#define CK_COHORT_INITIALIZER { \
|
||||
.global_lock = NULL, \
|
||||
.local_lock = NULL, \
|
||||
.release_state = CK_COHORT_STATE_GLOBAL, \
|
||||
.waiting_threads = 0, \
|
||||
.acquire_count = 0, \
|
||||
.local_pass_limit = CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT \
|
||||
}
|
||||
|
||||
#endif /* CK_COHORT_H */
|
321
sys/contrib/ck/include/ck_elide.h
Normal file
321
sys/contrib/ck/include/ck_elide.h
Normal file
@ -0,0 +1,321 @@
|
||||
/*
|
||||
* Copyright 2013-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_ELIDE_H
|
||||
#define CK_ELIDE_H
|
||||
|
||||
/*
|
||||
* As RTM is currently only supported on TSO x86 architectures,
|
||||
* fences have been omitted. They will be necessary for other
|
||||
* non-TSO architectures with TM support.
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
/*
|
||||
* skip_-prefixed counters represent the number of consecutive
|
||||
* elisions to forfeit. retry_-prefixed counters represent the
|
||||
* number of elision retries to attempt before forfeit.
|
||||
*
|
||||
* _busy: Lock was busy
|
||||
* _other: Unknown explicit abort
|
||||
* _conflict: Data conflict in elision section
|
||||
*/
|
||||
struct ck_elide_config {
|
||||
unsigned short skip_busy;
|
||||
short retry_busy;
|
||||
unsigned short skip_other;
|
||||
short retry_other;
|
||||
unsigned short skip_conflict;
|
||||
short retry_conflict;
|
||||
};
|
||||
|
||||
#define CK_ELIDE_CONFIG_DEFAULT_INITIALIZER { \
|
||||
.skip_busy = 5, \
|
||||
.retry_busy = 256, \
|
||||
.skip_other = 3, \
|
||||
.retry_other = 3, \
|
||||
.skip_conflict = 2, \
|
||||
.retry_conflict = 5 \
|
||||
}
|
||||
|
||||
struct ck_elide_stat {
|
||||
unsigned int n_fallback;
|
||||
unsigned int n_elide;
|
||||
unsigned short skip;
|
||||
};
|
||||
typedef struct ck_elide_stat ck_elide_stat_t;
|
||||
|
||||
#define CK_ELIDE_STAT_INITIALIZER { 0, 0, 0 }
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_elide_stat_init(ck_elide_stat_t *st)
|
||||
{
|
||||
|
||||
memset(st, 0, sizeof(*st));
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CK_F_PR_RTM
|
||||
enum _ck_elide_hint {
|
||||
CK_ELIDE_HINT_RETRY = 0,
|
||||
CK_ELIDE_HINT_SPIN,
|
||||
CK_ELIDE_HINT_STOP
|
||||
};
|
||||
|
||||
#define CK_ELIDE_LOCK_BUSY 0xFF
|
||||
|
||||
static enum _ck_elide_hint
|
||||
_ck_elide_fallback(int *retry,
|
||||
struct ck_elide_stat *st,
|
||||
struct ck_elide_config *c,
|
||||
unsigned int status)
|
||||
{
|
||||
|
||||
st->n_fallback++;
|
||||
if (*retry > 0)
|
||||
return CK_ELIDE_HINT_RETRY;
|
||||
|
||||
if (st->skip != 0)
|
||||
return CK_ELIDE_HINT_STOP;
|
||||
|
||||
if (status & CK_PR_RTM_EXPLICIT) {
|
||||
if (CK_PR_RTM_CODE(status) == CK_ELIDE_LOCK_BUSY) {
|
||||
st->skip = c->skip_busy;
|
||||
*retry = c->retry_busy;
|
||||
return CK_ELIDE_HINT_SPIN;
|
||||
}
|
||||
|
||||
st->skip = c->skip_other;
|
||||
return CK_ELIDE_HINT_STOP;
|
||||
}
|
||||
|
||||
if ((status & CK_PR_RTM_RETRY) &&
|
||||
(status & CK_PR_RTM_CONFLICT)) {
|
||||
st->skip = c->skip_conflict;
|
||||
*retry = c->retry_conflict;
|
||||
return CK_ELIDE_HINT_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Capacity, debug and nesting abortions are likely to be
|
||||
* invariant conditions for the acquisition, execute regular
|
||||
* path instead. If retry bit is not set, then take the hint.
|
||||
*/
|
||||
st->skip = USHRT_MAX;
|
||||
return CK_ELIDE_HINT_STOP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Defines an elision implementation according to the following variables:
|
||||
* N - Namespace of elision implementation.
|
||||
* T - Typename of mutex.
|
||||
* L_P - Lock predicate, returns false if resource is available.
|
||||
* L - Function to call if resource is unavailable of transaction aborts.
|
||||
* U_P - Unlock predicate, returns false if elision failed.
|
||||
* U - Function to call if transaction failed.
|
||||
*/
|
||||
#define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_lock_adaptive(T *lock, \
|
||||
struct ck_elide_stat *st, \
|
||||
struct ck_elide_config *c) \
|
||||
{ \
|
||||
enum _ck_elide_hint hint; \
|
||||
int retry; \
|
||||
\
|
||||
if (CK_CC_UNLIKELY(st->skip != 0)) { \
|
||||
st->skip--; \
|
||||
goto acquire; \
|
||||
} \
|
||||
\
|
||||
retry = c->retry_conflict; \
|
||||
do { \
|
||||
unsigned int status = ck_pr_rtm_begin(); \
|
||||
if (status == CK_PR_RTM_STARTED) { \
|
||||
if (L_P(lock) == true) \
|
||||
ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
|
||||
\
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
hint = _ck_elide_fallback(&retry, st, c, status); \
|
||||
if (hint == CK_ELIDE_HINT_RETRY) \
|
||||
continue; \
|
||||
\
|
||||
if (hint == CK_ELIDE_HINT_SPIN) { \
|
||||
while (--retry != 0) { \
|
||||
if (L_P(lock) == false) \
|
||||
break; \
|
||||
\
|
||||
ck_pr_stall(); \
|
||||
} \
|
||||
\
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
if (hint == CK_ELIDE_HINT_STOP) \
|
||||
break; \
|
||||
} while (CK_CC_LIKELY(--retry > 0)); \
|
||||
\
|
||||
acquire: \
|
||||
L(lock); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st, T *lock) \
|
||||
{ \
|
||||
\
|
||||
if (U_P(lock) == false) { \
|
||||
ck_pr_rtm_end(); \
|
||||
st->skip = 0; \
|
||||
st->n_elide++; \
|
||||
} else { \
|
||||
U(lock); \
|
||||
} \
|
||||
\
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_lock(T *lock) \
|
||||
{ \
|
||||
\
|
||||
if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED) { \
|
||||
L(lock); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
if (L_P(lock) == true) \
|
||||
ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
|
||||
\
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_unlock(T *lock) \
|
||||
{ \
|
||||
\
|
||||
if (U_P(lock) == false) { \
|
||||
ck_pr_rtm_end(); \
|
||||
} else { \
|
||||
U(lock); \
|
||||
} \
|
||||
\
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_elide_##N##_trylock(T *lock) \
|
||||
{ \
|
||||
\
|
||||
if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED) \
|
||||
return false; \
|
||||
\
|
||||
if (TL_P(lock) == true) \
|
||||
ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
|
||||
\
|
||||
return true; \
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* If RTM is not enabled on the target platform (CK_F_PR_RTM) then these
|
||||
* elision wrappers directly calls into the user-specified lock operations.
|
||||
* Unfortunately, the storage cost of both ck_elide_config and ck_elide_stat
|
||||
* are paid (typically a storage cost that is a function of lock objects and
|
||||
* thread count).
|
||||
*/
|
||||
#define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_lock_adaptive(T *lock, \
|
||||
struct ck_elide_stat *st, \
|
||||
struct ck_elide_config *c) \
|
||||
{ \
|
||||
\
|
||||
(void)st; \
|
||||
(void)c; \
|
||||
L(lock); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st, \
|
||||
T *lock) \
|
||||
{ \
|
||||
\
|
||||
(void)st; \
|
||||
U(lock); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_lock(T *lock) \
|
||||
{ \
|
||||
\
|
||||
L(lock); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_elide_##N##_unlock(T *lock) \
|
||||
{ \
|
||||
\
|
||||
U(lock); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_elide_##N##_trylock(T *lock) \
|
||||
{ \
|
||||
\
|
||||
return TL_P(lock); \
|
||||
}
|
||||
#endif /* !CK_F_PR_RTM */
|
||||
|
||||
/*
|
||||
* Best-effort elision lock operations. First argument is name (N)
|
||||
* associated with implementation and the second is a pointer to
|
||||
* the type specified above (T).
|
||||
*
|
||||
* Unlike the adaptive variant, this interface does not have any retry
|
||||
* semantics. In environments where jitter is low, this may yield a tighter
|
||||
* fast path.
|
||||
*/
|
||||
#define CK_ELIDE_LOCK(NAME, LOCK) ck_elide_##NAME##_lock(LOCK)
|
||||
#define CK_ELIDE_UNLOCK(NAME, LOCK) ck_elide_##NAME##_unlock(LOCK)
|
||||
#define CK_ELIDE_TRYLOCK(NAME, LOCK) ck_elide_##NAME##_trylock(LOCK)
|
||||
|
||||
/*
|
||||
* Adaptive elision lock operations. In addition to name and pointer
|
||||
* to the lock, you must pass in a pointer to an initialized
|
||||
* ck_elide_config structure along with a per-thread stat structure.
|
||||
*/
|
||||
#define CK_ELIDE_LOCK_ADAPTIVE(NAME, STAT, CONFIG, LOCK) \
|
||||
ck_elide_##NAME##_lock_adaptive(LOCK, STAT, CONFIG)
|
||||
|
||||
#define CK_ELIDE_UNLOCK_ADAPTIVE(NAME, STAT, LOCK) \
|
||||
ck_elide_##NAME##_unlock_adaptive(STAT, LOCK)
|
||||
|
||||
#endif /* CK_ELIDE_H */
|
207
sys/contrib/ck/include/ck_epoch.h
Normal file
207
sys/contrib/ck/include/ck_epoch.h
Normal file
@ -0,0 +1,207 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_EPOCH_H
|
||||
#define CK_EPOCH_H
|
||||
|
||||
/*
|
||||
* The implementation here is inspired from the work described in:
|
||||
* Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
|
||||
* of Cambridge Computing Laboratory.
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stack.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
#ifndef CK_EPOCH_LENGTH
|
||||
#define CK_EPOCH_LENGTH 4
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is used for sense detection with-respect to concurrent
|
||||
* epoch sections.
|
||||
*/
|
||||
#define CK_EPOCH_SENSE (2)
|
||||
|
||||
struct ck_epoch_entry;
|
||||
typedef struct ck_epoch_entry ck_epoch_entry_t;
|
||||
typedef void ck_epoch_cb_t(ck_epoch_entry_t *);
|
||||
|
||||
/*
|
||||
* This should be embedded into objects you wish to be the target of
|
||||
* ck_epoch_cb_t functions (with ck_epoch_call).
|
||||
*/
|
||||
struct ck_epoch_entry {
|
||||
ck_epoch_cb_t *function;
|
||||
ck_stack_entry_t stack_entry;
|
||||
};
|
||||
|
||||
/*
|
||||
* A section object may be passed to every begin-end pair to allow for
|
||||
* forward progress guarantees with-in prolonged active sections.
|
||||
*/
|
||||
struct ck_epoch_section {
|
||||
unsigned int bucket;
|
||||
};
|
||||
typedef struct ck_epoch_section ck_epoch_section_t;
|
||||
|
||||
/*
|
||||
* Return pointer to ck_epoch_entry container object.
|
||||
*/
|
||||
#define CK_EPOCH_CONTAINER(T, M, N) \
|
||||
CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N)
|
||||
|
||||
struct ck_epoch_ref {
|
||||
unsigned int epoch;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
struct ck_epoch_record {
|
||||
struct ck_epoch *global;
|
||||
unsigned int state;
|
||||
unsigned int epoch;
|
||||
unsigned int active;
|
||||
struct {
|
||||
struct ck_epoch_ref bucket[CK_EPOCH_SENSE];
|
||||
} local CK_CC_CACHELINE;
|
||||
unsigned int n_pending;
|
||||
unsigned int n_peak;
|
||||
unsigned long n_dispatch;
|
||||
ck_stack_t pending[CK_EPOCH_LENGTH];
|
||||
ck_stack_entry_t record_next;
|
||||
} CK_CC_CACHELINE;
|
||||
typedef struct ck_epoch_record ck_epoch_record_t;
|
||||
|
||||
struct ck_epoch {
|
||||
unsigned int epoch;
|
||||
char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
|
||||
ck_stack_t records;
|
||||
unsigned int n_free;
|
||||
};
|
||||
typedef struct ck_epoch ck_epoch_t;
|
||||
|
||||
/*
|
||||
* Internal functions.
|
||||
*/
|
||||
void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *);
|
||||
void _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);
|
||||
|
||||
/*
|
||||
* Marks the beginning of an epoch-protected section.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static void
|
||||
ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section)
|
||||
{
|
||||
struct ck_epoch *epoch = record->global;
|
||||
|
||||
/*
|
||||
* Only observe new epoch if thread is not recursing into a read
|
||||
* section.
|
||||
*/
|
||||
if (record->active == 0) {
|
||||
unsigned int g_epoch;
|
||||
|
||||
/*
|
||||
* It is possible for loads to be re-ordered before the store
|
||||
* is committed into the caller's epoch and active fields.
|
||||
* For this reason, store to load serialization is necessary.
|
||||
*/
|
||||
#if defined(CK_MD_TSO)
|
||||
ck_pr_fas_uint(&record->active, 1);
|
||||
ck_pr_fence_atomic_load();
|
||||
#else
|
||||
ck_pr_store_uint(&record->active, 1);
|
||||
ck_pr_fence_memory();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This load is allowed to be re-ordered prior to setting
|
||||
* active flag due to monotonic nature of the global epoch.
|
||||
* However, stale values lead to measurable performance
|
||||
* degradation in some torture tests so we disallow early load
|
||||
* of global epoch.
|
||||
*/
|
||||
g_epoch = ck_pr_load_uint(&epoch->epoch);
|
||||
ck_pr_store_uint(&record->epoch, g_epoch);
|
||||
} else {
|
||||
ck_pr_store_uint(&record->active, record->active + 1);
|
||||
}
|
||||
|
||||
if (section != NULL)
|
||||
_ck_epoch_addref(record, section);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Marks the end of an epoch-protected section.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static void
|
||||
ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)
|
||||
{
|
||||
|
||||
ck_pr_fence_release();
|
||||
ck_pr_store_uint(&record->active, record->active - 1);
|
||||
|
||||
if (section != NULL)
|
||||
_ck_epoch_delref(record, section);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Defers the execution of the function pointed to by the "cb"
|
||||
* argument until an epoch counter loop. This allows for a
|
||||
* non-blocking deferral.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static void
|
||||
ck_epoch_call(ck_epoch_record_t *record,
|
||||
ck_epoch_entry_t *entry,
|
||||
ck_epoch_cb_t *function)
|
||||
{
|
||||
struct ck_epoch *epoch = record->global;
|
||||
unsigned int e = ck_pr_load_uint(&epoch->epoch);
|
||||
unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
|
||||
|
||||
record->n_pending++;
|
||||
entry->function = function;
|
||||
ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry);
|
||||
return;
|
||||
}
|
||||
|
||||
void ck_epoch_init(ck_epoch_t *);
|
||||
ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
|
||||
void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);
|
||||
void ck_epoch_unregister(ck_epoch_record_t *);
|
||||
bool ck_epoch_poll(ck_epoch_record_t *);
|
||||
void ck_epoch_synchronize(ck_epoch_record_t *);
|
||||
void ck_epoch_barrier(ck_epoch_record_t *);
|
||||
void ck_epoch_reclaim(ck_epoch_record_t *);
|
||||
|
||||
#endif /* CK_EPOCH_H */
|
478
sys/contrib/ck/include/ck_fifo.h
Normal file
478
sys/contrib/ck/include/ck_fifo.h
Normal file
@ -0,0 +1,478 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_FIFO_H
|
||||
#define CK_FIFO_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_spinlock.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
#ifndef CK_F_FIFO_SPSC
|
||||
#define CK_F_FIFO_SPSC
|
||||
struct ck_fifo_spsc_entry {
|
||||
void *value;
|
||||
struct ck_fifo_spsc_entry *next;
|
||||
};
|
||||
typedef struct ck_fifo_spsc_entry ck_fifo_spsc_entry_t;
|
||||
|
||||
struct ck_fifo_spsc {
|
||||
ck_spinlock_t m_head;
|
||||
struct ck_fifo_spsc_entry *head;
|
||||
char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_spsc_entry *) - sizeof(ck_spinlock_t)];
|
||||
ck_spinlock_t m_tail;
|
||||
struct ck_fifo_spsc_entry *tail;
|
||||
struct ck_fifo_spsc_entry *head_snapshot;
|
||||
struct ck_fifo_spsc_entry *garbage;
|
||||
};
|
||||
typedef struct ck_fifo_spsc ck_fifo_spsc_t;
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_fifo_spsc_enqueue_trylock(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
|
||||
return ck_spinlock_trylock(&fifo->m_tail);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_spsc_enqueue_lock(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
|
||||
ck_spinlock_lock(&fifo->m_tail);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_spsc_enqueue_unlock(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
|
||||
ck_spinlock_unlock(&fifo->m_tail);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_fifo_spsc_dequeue_trylock(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
|
||||
return ck_spinlock_trylock(&fifo->m_head);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_spsc_dequeue_lock(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
|
||||
ck_spinlock_lock(&fifo->m_head);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_spsc_dequeue_unlock(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
|
||||
ck_spinlock_unlock(&fifo->m_head);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_spsc_init(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry *stub)
|
||||
{
|
||||
|
||||
ck_spinlock_init(&fifo->m_head);
|
||||
ck_spinlock_init(&fifo->m_tail);
|
||||
|
||||
stub->next = NULL;
|
||||
fifo->head = fifo->tail = fifo->head_snapshot = fifo->garbage = stub;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_spsc_deinit(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry **garbage)
|
||||
{
|
||||
|
||||
*garbage = fifo->head;
|
||||
fifo->head = fifo->tail = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_spsc_enqueue(struct ck_fifo_spsc *fifo,
|
||||
struct ck_fifo_spsc_entry *entry,
|
||||
void *value)
|
||||
{
|
||||
|
||||
entry->value = value;
|
||||
entry->next = NULL;
|
||||
|
||||
/* If stub->next is visible, guarantee that entry is consistent. */
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_ptr(&fifo->tail->next, entry);
|
||||
fifo->tail = entry;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_fifo_spsc_dequeue(struct ck_fifo_spsc *fifo, void *value)
|
||||
{
|
||||
struct ck_fifo_spsc_entry *entry;
|
||||
|
||||
/*
|
||||
* The head pointer is guaranteed to always point to a stub entry.
|
||||
* If the stub entry does not point to an entry, then the queue is
|
||||
* empty.
|
||||
*/
|
||||
entry = ck_pr_load_ptr(&fifo->head->next);
|
||||
if (entry == NULL)
|
||||
return false;
|
||||
|
||||
/* If entry is visible, guarantee store to value is visible. */
|
||||
ck_pr_store_ptr_unsafe(value, entry->value);
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_ptr(&fifo->head, entry);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Recycle a node. This technique for recycling nodes is based on
|
||||
* Dmitriy Vyukov's work.
|
||||
*/
|
||||
CK_CC_INLINE static struct ck_fifo_spsc_entry *
|
||||
ck_fifo_spsc_recycle(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
struct ck_fifo_spsc_entry *garbage;
|
||||
|
||||
if (fifo->head_snapshot == fifo->garbage) {
|
||||
fifo->head_snapshot = ck_pr_load_ptr(&fifo->head);
|
||||
if (fifo->head_snapshot == fifo->garbage)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
garbage = fifo->garbage;
|
||||
fifo->garbage = garbage->next;
|
||||
return garbage;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_fifo_spsc_isempty(struct ck_fifo_spsc *fifo)
|
||||
{
|
||||
struct ck_fifo_spsc_entry *head = ck_pr_load_ptr(&fifo->head);
|
||||
return ck_pr_load_ptr(&head->next) == NULL;
|
||||
}
|
||||
|
||||
#define CK_FIFO_SPSC_ISEMPTY(f) ((f)->head->next == NULL)
|
||||
#define CK_FIFO_SPSC_FIRST(f) ((f)->head->next)
|
||||
#define CK_FIFO_SPSC_NEXT(m) ((m)->next)
|
||||
#define CK_FIFO_SPSC_SPARE(f) ((f)->head)
|
||||
#define CK_FIFO_SPSC_FOREACH(fifo, entry) \
|
||||
for ((entry) = CK_FIFO_SPSC_FIRST(fifo); \
|
||||
(entry) != NULL; \
|
||||
(entry) = CK_FIFO_SPSC_NEXT(entry))
|
||||
#define CK_FIFO_SPSC_FOREACH_SAFE(fifo, entry, T) \
|
||||
for ((entry) = CK_FIFO_SPSC_FIRST(fifo); \
|
||||
(entry) != NULL && ((T) = (entry)->next, 1); \
|
||||
(entry) = (T))
|
||||
|
||||
#endif /* CK_F_FIFO_SPSC */
|
||||
|
||||
#ifdef CK_F_PR_CAS_PTR_2
|
||||
#ifndef CK_F_FIFO_MPMC
|
||||
#define CK_F_FIFO_MPMC
|
||||
struct ck_fifo_mpmc_entry;
|
||||
struct ck_fifo_mpmc_pointer {
|
||||
struct ck_fifo_mpmc_entry *pointer;
|
||||
char *generation CK_CC_PACKED;
|
||||
} CK_CC_ALIGN(16);
|
||||
|
||||
struct ck_fifo_mpmc_entry {
|
||||
void *value;
|
||||
struct ck_fifo_mpmc_pointer next;
|
||||
};
|
||||
typedef struct ck_fifo_mpmc_entry ck_fifo_mpmc_entry_t;
|
||||
|
||||
struct ck_fifo_mpmc {
|
||||
struct ck_fifo_mpmc_pointer head;
|
||||
char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_mpmc_pointer)];
|
||||
struct ck_fifo_mpmc_pointer tail;
|
||||
};
|
||||
typedef struct ck_fifo_mpmc ck_fifo_mpmc_t;
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_mpmc_init(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry *stub)
|
||||
{
|
||||
|
||||
stub->next.pointer = NULL;
|
||||
stub->next.generation = NULL;
|
||||
fifo->head.pointer = fifo->tail.pointer = stub;
|
||||
fifo->head.generation = fifo->tail.generation = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_mpmc_deinit(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry **garbage)
|
||||
{
|
||||
|
||||
*garbage = fifo->head.pointer;
|
||||
fifo->head.pointer = fifo->tail.pointer = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_fifo_mpmc_enqueue(struct ck_fifo_mpmc *fifo,
|
||||
struct ck_fifo_mpmc_entry *entry,
|
||||
void *value)
|
||||
{
|
||||
struct ck_fifo_mpmc_pointer tail, next, update;
|
||||
|
||||
/*
|
||||
* Prepare the upcoming node and make sure to commit the updates
|
||||
* before publishing.
|
||||
*/
|
||||
entry->value = value;
|
||||
entry->next.pointer = NULL;
|
||||
entry->next.generation = 0;
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
for (;;) {
|
||||
tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
|
||||
ck_pr_fence_load();
|
||||
tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
|
||||
next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
|
||||
ck_pr_fence_load();
|
||||
next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
|
||||
|
||||
if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
|
||||
continue;
|
||||
|
||||
if (next.pointer != NULL) {
|
||||
/*
|
||||
* If the tail pointer has an entry following it then
|
||||
* it needs to be forwarded to the next entry. This
|
||||
* helps us guarantee we are always operating on the
|
||||
* last entry.
|
||||
*/
|
||||
update.pointer = next.pointer;
|
||||
update.generation = tail.generation + 1;
|
||||
ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
|
||||
} else {
|
||||
/*
|
||||
* Attempt to commit new entry to the end of the
|
||||
* current tail.
|
||||
*/
|
||||
update.pointer = entry;
|
||||
update.generation = next.generation + 1;
|
||||
if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == true)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ck_pr_fence_atomic();
|
||||
|
||||
/* After a successful insert, forward the tail to the new entry. */
|
||||
update.generation = tail.generation + 1;
|
||||
ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_fifo_mpmc_tryenqueue(struct ck_fifo_mpmc *fifo,
|
||||
struct ck_fifo_mpmc_entry *entry,
|
||||
void *value)
|
||||
{
|
||||
struct ck_fifo_mpmc_pointer tail, next, update;
|
||||
|
||||
entry->value = value;
|
||||
entry->next.pointer = NULL;
|
||||
entry->next.generation = 0;
|
||||
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
|
||||
ck_pr_fence_load();
|
||||
tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
|
||||
next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
|
||||
ck_pr_fence_load();
|
||||
next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
|
||||
|
||||
if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
|
||||
return false;
|
||||
|
||||
if (next.pointer != NULL) {
|
||||
/*
|
||||
* If the tail pointer has an entry following it then
|
||||
* it needs to be forwarded to the next entry. This
|
||||
* helps us guarantee we are always operating on the
|
||||
* last entry.
|
||||
*/
|
||||
update.pointer = next.pointer;
|
||||
update.generation = tail.generation + 1;
|
||||
ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
|
||||
return false;
|
||||
} else {
|
||||
/*
|
||||
* Attempt to commit new entry to the end of the
|
||||
* current tail.
|
||||
*/
|
||||
update.pointer = entry;
|
||||
update.generation = next.generation + 1;
|
||||
if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == false)
|
||||
return false;
|
||||
}
|
||||
|
||||
ck_pr_fence_atomic();
|
||||
|
||||
/* After a successful insert, forward the tail to the new entry. */
|
||||
update.generation = tail.generation + 1;
|
||||
ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_fifo_mpmc_dequeue(struct ck_fifo_mpmc *fifo,
|
||||
void *value,
|
||||
struct ck_fifo_mpmc_entry **garbage)
|
||||
{
|
||||
struct ck_fifo_mpmc_pointer head, tail, next, update;
|
||||
|
||||
for (;;) {
|
||||
head.generation = ck_pr_load_ptr(&fifo->head.generation);
|
||||
ck_pr_fence_load();
|
||||
head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
|
||||
tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
|
||||
ck_pr_fence_load();
|
||||
tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
|
||||
|
||||
next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
|
||||
ck_pr_fence_load();
|
||||
next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
|
||||
|
||||
update.pointer = next.pointer;
|
||||
if (head.pointer == tail.pointer) {
|
||||
/*
|
||||
* The head is guaranteed to always point at a stub
|
||||
* entry. If the stub entry has no references then the
|
||||
* queue is empty.
|
||||
*/
|
||||
if (next.pointer == NULL)
|
||||
return false;
|
||||
|
||||
/* Forward the tail pointer if necessary. */
|
||||
update.generation = tail.generation + 1;
|
||||
ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
|
||||
} else {
|
||||
/*
|
||||
* It is possible for head snapshot to have been
|
||||
* re-used. Avoid deferencing during enqueue
|
||||
* re-use.
|
||||
*/
|
||||
if (next.pointer == NULL)
|
||||
continue;
|
||||
|
||||
/* Save value before commit. */
|
||||
*(void **)value = ck_pr_load_ptr(&next.pointer->value);
|
||||
|
||||
/* Forward the head pointer to the next entry. */
|
||||
update.generation = head.generation + 1;
|
||||
if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == true)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
*garbage = head.pointer;
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo,
|
||||
void *value,
|
||||
struct ck_fifo_mpmc_entry **garbage)
|
||||
{
|
||||
struct ck_fifo_mpmc_pointer head, tail, next, update;
|
||||
|
||||
head.generation = ck_pr_load_ptr(&fifo->head.generation);
|
||||
ck_pr_fence_load();
|
||||
head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
|
||||
|
||||
tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
|
||||
ck_pr_fence_load();
|
||||
tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
|
||||
|
||||
next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
|
||||
ck_pr_fence_load();
|
||||
next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
|
||||
|
||||
update.pointer = next.pointer;
|
||||
if (head.pointer == tail.pointer) {
|
||||
/*
|
||||
* The head is guaranteed to always point at a stub
|
||||
* entry. If the stub entry has no references then the
|
||||
* queue is empty.
|
||||
*/
|
||||
if (next.pointer == NULL)
|
||||
return false;
|
||||
|
||||
/* Forward the tail pointer if necessary. */
|
||||
update.generation = tail.generation + 1;
|
||||
ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
|
||||
return false;
|
||||
} else {
|
||||
/*
|
||||
* It is possible for head snapshot to have been
|
||||
* re-used. Avoid deferencing during enqueue.
|
||||
*/
|
||||
if (next.pointer == NULL)
|
||||
return false;
|
||||
|
||||
/* Save value before commit. */
|
||||
*(void **)value = ck_pr_load_ptr(&next.pointer->value);
|
||||
|
||||
/* Forward the head pointer to the next entry. */
|
||||
update.generation = head.generation + 1;
|
||||
if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == false)
|
||||
return false;
|
||||
}
|
||||
|
||||
*garbage = head.pointer;
|
||||
return true;
|
||||
}
|
||||
|
||||
#define CK_FIFO_MPMC_ISEMPTY(f) ((f)->head.pointer->next.pointer == NULL)
|
||||
#define CK_FIFO_MPMC_FIRST(f) ((f)->head.pointer->next.pointer)
|
||||
#define CK_FIFO_MPMC_NEXT(m) ((m)->next.pointer)
|
||||
#define CK_FIFO_MPMC_FOREACH(fifo, entry) \
|
||||
for ((entry) = CK_FIFO_MPMC_FIRST(fifo); \
|
||||
(entry) != NULL; \
|
||||
(entry) = CK_FIFO_MPMC_NEXT(entry))
|
||||
#define CK_FIFO_MPMC_FOREACH_SAFE(fifo, entry, T) \
|
||||
for ((entry) = CK_FIFO_MPMC_FIRST(fifo); \
|
||||
(entry) != NULL && ((T) = (entry)->next.pointer, 1); \
|
||||
(entry) = (T))
|
||||
|
||||
#endif /* CK_F_FIFO_MPMC */
|
||||
#endif /* CK_F_PR_CAS_PTR_2 */
|
||||
|
||||
#endif /* CK_FIFO_H */
|
121
sys/contrib/ck/include/ck_hp.h
Normal file
121
sys/contrib/ck/include/ck_hp.h
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_HP_H
|
||||
#define CK_HP_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stack.h>
|
||||
|
||||
#ifndef CK_HP_CACHE
|
||||
#define CK_HP_CACHE 512
|
||||
#endif
|
||||
|
||||
struct ck_hp_hazard;
|
||||
typedef void (*ck_hp_destructor_t)(void *);
|
||||
|
||||
struct ck_hp {
|
||||
ck_stack_t subscribers;
|
||||
unsigned int n_subscribers;
|
||||
unsigned int n_free;
|
||||
unsigned int threshold;
|
||||
unsigned int degree;
|
||||
ck_hp_destructor_t destroy;
|
||||
};
|
||||
typedef struct ck_hp ck_hp_t;
|
||||
|
||||
struct ck_hp_hazard {
|
||||
void *pointer;
|
||||
void *data;
|
||||
ck_stack_entry_t pending_entry;
|
||||
};
|
||||
typedef struct ck_hp_hazard ck_hp_hazard_t;
|
||||
|
||||
enum {
|
||||
CK_HP_USED = 0,
|
||||
CK_HP_FREE = 1
|
||||
};
|
||||
|
||||
struct ck_hp_record {
|
||||
int state;
|
||||
void **pointers;
|
||||
void *cache[CK_HP_CACHE];
|
||||
struct ck_hp *global;
|
||||
ck_stack_t pending;
|
||||
unsigned int n_pending;
|
||||
ck_stack_entry_t global_entry;
|
||||
unsigned int n_peak;
|
||||
uint64_t n_reclamations;
|
||||
} CK_CC_CACHELINE;
|
||||
typedef struct ck_hp_record ck_hp_record_t;
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_hp_set(struct ck_hp_record *record, unsigned int i, void *pointer)
|
||||
{
|
||||
|
||||
ck_pr_store_ptr(&record->pointers[i], pointer);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_hp_set_fence(struct ck_hp_record *record, unsigned int i, void *pointer)
|
||||
{
|
||||
|
||||
#ifdef CK_MD_TSO
|
||||
ck_pr_fas_ptr(&record->pointers[i], pointer);
|
||||
#else
|
||||
ck_pr_store_ptr(&record->pointers[i], pointer);
|
||||
ck_pr_fence_memory();
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_hp_clear(struct ck_hp_record *record)
|
||||
{
|
||||
void **pointers = record->pointers;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < record->global->degree; i++)
|
||||
*pointers++ = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void ck_hp_init(ck_hp_t *, unsigned int, unsigned int, ck_hp_destructor_t);
|
||||
void ck_hp_set_threshold(ck_hp_t *, unsigned int);
|
||||
void ck_hp_register(ck_hp_t *, ck_hp_record_t *, void **);
|
||||
void ck_hp_unregister(ck_hp_record_t *);
|
||||
ck_hp_record_t *ck_hp_recycle(ck_hp_t *);
|
||||
void ck_hp_reclaim(ck_hp_record_t *);
|
||||
void ck_hp_free(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
|
||||
void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
|
||||
void ck_hp_purge(ck_hp_record_t *);
|
||||
|
||||
#endif /* CK_HP_H */
|
215
sys/contrib/ck/include/ck_hp_fifo.h
Normal file
215
sys/contrib/ck/include/ck_hp_fifo.h
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_HP_FIFO_H
|
||||
#define CK_HP_FIFO_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_hp.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
#define CK_HP_FIFO_SLOTS_COUNT (2)
|
||||
#define CK_HP_FIFO_SLOTS_SIZE (sizeof(void *) * CK_HP_FIFO_SLOTS_COUNT)
|
||||
|
||||
/*
|
||||
* Though it is possible to embed the data structure, measurements need
|
||||
* to be made for the cost of this. If we were to embed the hazard pointer
|
||||
* state into the data structure, this means every deferred reclamation
|
||||
* will also include a cache invalidation when linking into the hazard pointer
|
||||
* pending queue. This may lead to terrible cache line bouncing.
|
||||
*/
|
||||
struct ck_hp_fifo_entry {
|
||||
void *value;
|
||||
ck_hp_hazard_t hazard;
|
||||
struct ck_hp_fifo_entry *next;
|
||||
};
|
||||
typedef struct ck_hp_fifo_entry ck_hp_fifo_entry_t;
|
||||
|
||||
struct ck_hp_fifo {
|
||||
struct ck_hp_fifo_entry *head;
|
||||
struct ck_hp_fifo_entry *tail;
|
||||
};
|
||||
typedef struct ck_hp_fifo ck_hp_fifo_t;
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_hp_fifo_init(struct ck_hp_fifo *fifo, struct ck_hp_fifo_entry *stub)
|
||||
{
|
||||
|
||||
fifo->head = fifo->tail = stub;
|
||||
stub->next = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_hp_fifo_deinit(struct ck_hp_fifo *fifo, struct ck_hp_fifo_entry **stub)
|
||||
{
|
||||
|
||||
*stub = fifo->head;
|
||||
fifo->head = fifo->tail = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_hp_fifo_enqueue_mpmc(ck_hp_record_t *record,
|
||||
struct ck_hp_fifo *fifo,
|
||||
struct ck_hp_fifo_entry *entry,
|
||||
void *value)
|
||||
{
|
||||
struct ck_hp_fifo_entry *tail, *next;
|
||||
|
||||
entry->value = value;
|
||||
entry->next = NULL;
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
for (;;) {
|
||||
tail = ck_pr_load_ptr(&fifo->tail);
|
||||
ck_hp_set_fence(record, 0, tail);
|
||||
if (tail != ck_pr_load_ptr(&fifo->tail))
|
||||
continue;
|
||||
|
||||
next = ck_pr_load_ptr(&tail->next);
|
||||
if (next != NULL) {
|
||||
ck_pr_cas_ptr(&fifo->tail, tail, next);
|
||||
continue;
|
||||
} else if (ck_pr_cas_ptr(&fifo->tail->next, next, entry) == true)
|
||||
break;
|
||||
}
|
||||
|
||||
ck_pr_fence_atomic();
|
||||
ck_pr_cas_ptr(&fifo->tail, tail, entry);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_hp_fifo_tryenqueue_mpmc(ck_hp_record_t *record,
|
||||
struct ck_hp_fifo *fifo,
|
||||
struct ck_hp_fifo_entry *entry,
|
||||
void *value)
|
||||
{
|
||||
struct ck_hp_fifo_entry *tail, *next;
|
||||
|
||||
entry->value = value;
|
||||
entry->next = NULL;
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
tail = ck_pr_load_ptr(&fifo->tail);
|
||||
ck_hp_set_fence(record, 0, tail);
|
||||
if (tail != ck_pr_load_ptr(&fifo->tail))
|
||||
return false;
|
||||
|
||||
next = ck_pr_load_ptr(&tail->next);
|
||||
if (next != NULL) {
|
||||
ck_pr_cas_ptr(&fifo->tail, tail, next);
|
||||
return false;
|
||||
} else if (ck_pr_cas_ptr(&fifo->tail->next, next, entry) == false)
|
||||
return false;
|
||||
|
||||
ck_pr_fence_atomic();
|
||||
ck_pr_cas_ptr(&fifo->tail, tail, entry);
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static struct ck_hp_fifo_entry *
|
||||
ck_hp_fifo_dequeue_mpmc(ck_hp_record_t *record,
|
||||
struct ck_hp_fifo *fifo,
|
||||
void *value)
|
||||
{
|
||||
struct ck_hp_fifo_entry *head, *tail, *next;
|
||||
|
||||
for (;;) {
|
||||
head = ck_pr_load_ptr(&fifo->head);
|
||||
ck_pr_fence_load();
|
||||
tail = ck_pr_load_ptr(&fifo->tail);
|
||||
ck_hp_set_fence(record, 0, head);
|
||||
if (head != ck_pr_load_ptr(&fifo->head))
|
||||
continue;
|
||||
|
||||
next = ck_pr_load_ptr(&head->next);
|
||||
ck_hp_set_fence(record, 1, next);
|
||||
if (head != ck_pr_load_ptr(&fifo->head))
|
||||
continue;
|
||||
|
||||
if (head == tail) {
|
||||
if (next == NULL)
|
||||
return NULL;
|
||||
|
||||
ck_pr_cas_ptr(&fifo->tail, tail, next);
|
||||
continue;
|
||||
} else if (ck_pr_cas_ptr(&fifo->head, head, next) == true)
|
||||
break;
|
||||
}
|
||||
|
||||
ck_pr_store_ptr_unsafe(value, next->value);
|
||||
return head;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static struct ck_hp_fifo_entry *
|
||||
ck_hp_fifo_trydequeue_mpmc(ck_hp_record_t *record,
|
||||
struct ck_hp_fifo *fifo,
|
||||
void *value)
|
||||
{
|
||||
struct ck_hp_fifo_entry *head, *tail, *next;
|
||||
|
||||
head = ck_pr_load_ptr(&fifo->head);
|
||||
ck_pr_fence_load();
|
||||
tail = ck_pr_load_ptr(&fifo->tail);
|
||||
ck_hp_set_fence(record, 0, head);
|
||||
if (head != ck_pr_load_ptr(&fifo->head))
|
||||
return NULL;
|
||||
|
||||
next = ck_pr_load_ptr(&head->next);
|
||||
ck_hp_set_fence(record, 1, next);
|
||||
if (head != ck_pr_load_ptr(&fifo->head))
|
||||
return NULL;
|
||||
|
||||
if (head == tail) {
|
||||
if (next == NULL)
|
||||
return NULL;
|
||||
|
||||
ck_pr_cas_ptr(&fifo->tail, tail, next);
|
||||
return NULL;
|
||||
} else if (ck_pr_cas_ptr(&fifo->head, head, next) == false)
|
||||
return NULL;
|
||||
|
||||
ck_pr_store_ptr_unsafe(value, next->value);
|
||||
return head;
|
||||
}
|
||||
|
||||
#define CK_HP_FIFO_ISEMPTY(f) ((f)->head->next == NULL)
|
||||
#define CK_HP_FIFO_FIRST(f) ((f)->head->next)
|
||||
#define CK_HP_FIFO_NEXT(m) ((m)->next)
|
||||
#define CK_HP_FIFO_FOREACH(fifo, entry) \
|
||||
for ((entry) = CK_HP_FIFO_FIRST(fifo); \
|
||||
(entry) != NULL; \
|
||||
(entry) = CK_HP_FIFO_NEXT(entry))
|
||||
#define CK_HP_FIFO_FOREACH_SAFE(fifo, entry, T) \
|
||||
for ((entry) = CK_HP_FIFO_FIRST(fifo); \
|
||||
(entry) != NULL && ((T) = (entry)->next, 1); \
|
||||
(entry) = (T))
|
||||
|
||||
#endif /* CK_HP_FIFO_H */
|
110
sys/contrib/ck/include/ck_hp_stack.h
Normal file
110
sys/contrib/ck/include/ck_hp_stack.h
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_HP_STACK_H
|
||||
#define CK_HP_STACK_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_hp.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stack.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
#define CK_HP_STACK_SLOTS_COUNT 1
|
||||
#define CK_HP_STACK_SLOTS_SIZE sizeof(void *)
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_hp_stack_push_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
|
||||
ck_stack_push_upmc(target, entry);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_hp_stack_trypush_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
|
||||
return ck_stack_trypush_upmc(target, entry);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static struct ck_stack_entry *
|
||||
ck_hp_stack_pop_mpmc(ck_hp_record_t *record, struct ck_stack *target)
|
||||
{
|
||||
struct ck_stack_entry *entry, *update;
|
||||
|
||||
do {
|
||||
entry = ck_pr_load_ptr(&target->head);
|
||||
if (entry == NULL)
|
||||
return NULL;
|
||||
|
||||
ck_hp_set_fence(record, 0, entry);
|
||||
} while (entry != ck_pr_load_ptr(&target->head));
|
||||
|
||||
while (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) == false) {
|
||||
if (entry == NULL)
|
||||
return NULL;
|
||||
|
||||
ck_hp_set_fence(record, 0, entry);
|
||||
|
||||
update = ck_pr_load_ptr(&target->head);
|
||||
while (entry != update) {
|
||||
ck_hp_set_fence(record, 0, update);
|
||||
entry = update;
|
||||
update = ck_pr_load_ptr(&target->head);
|
||||
if (update == NULL)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_hp_stack_trypop_mpmc(ck_hp_record_t *record, struct ck_stack *target, struct ck_stack_entry **r)
|
||||
{
|
||||
struct ck_stack_entry *entry;
|
||||
|
||||
entry = ck_pr_load_ptr(&target->head);
|
||||
if (entry == NULL)
|
||||
return false;
|
||||
|
||||
ck_hp_set_fence(record, 0, entry);
|
||||
if (entry != ck_pr_load_ptr(&target->head))
|
||||
goto leave;
|
||||
|
||||
if (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) == false)
|
||||
goto leave;
|
||||
|
||||
*r = entry;
|
||||
return true;
|
||||
|
||||
leave:
|
||||
ck_hp_set(record, 0, NULL);
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CK_HP_STACK_H */
|
134
sys/contrib/ck/include/ck_hs.h
Normal file
134
sys/contrib/ck/include/ck_hs.h
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_HS_H
|
||||
#define CK_HS_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_malloc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdint.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
/*
|
||||
* Indicates a single-writer many-reader workload. Mutually
|
||||
* exclusive with CK_HS_MODE_MPMC
|
||||
*/
|
||||
#define CK_HS_MODE_SPMC 1
|
||||
|
||||
/*
|
||||
* Indicates that values to be stored are not pointers but
|
||||
* values. Allows for full precision. Mutually exclusive
|
||||
* with CK_HS_MODE_OBJECT.
|
||||
*/
|
||||
#define CK_HS_MODE_DIRECT 2
|
||||
|
||||
/*
|
||||
* Indicates that the values to be stored are pointers.
|
||||
* Allows for space optimizations in the presence of pointer
|
||||
* packing. Mutually exclusive with CK_HS_MODE_DIRECT.
|
||||
*/
|
||||
#define CK_HS_MODE_OBJECT 8
|
||||
|
||||
/*
|
||||
* Indicates a delete-heavy workload. This will reduce the
|
||||
* need for garbage collection at the cost of approximately
|
||||
* 12% to 20% increased memory usage.
|
||||
*/
|
||||
#define CK_HS_MODE_DELETE 16
|
||||
|
||||
/* Currently unsupported. */
|
||||
#define CK_HS_MODE_MPMC (void)
|
||||
|
||||
/*
|
||||
* Hash callback function.
|
||||
*/
|
||||
typedef unsigned long ck_hs_hash_cb_t(const void *, unsigned long);
|
||||
|
||||
/*
|
||||
* Returns pointer to object if objects are equivalent.
|
||||
*/
|
||||
typedef bool ck_hs_compare_cb_t(const void *, const void *);
|
||||
|
||||
#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
|
||||
#define CK_HS_PP
|
||||
#define CK_HS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1)
|
||||
#endif
|
||||
|
||||
struct ck_hs_map;
|
||||
struct ck_hs {
|
||||
struct ck_malloc *m;
|
||||
struct ck_hs_map *map;
|
||||
unsigned int mode;
|
||||
unsigned long seed;
|
||||
ck_hs_hash_cb_t *hf;
|
||||
ck_hs_compare_cb_t *compare;
|
||||
};
|
||||
typedef struct ck_hs ck_hs_t;
|
||||
|
||||
struct ck_hs_stat {
|
||||
unsigned long tombstones;
|
||||
unsigned long n_entries;
|
||||
unsigned int probe_maximum;
|
||||
};
|
||||
|
||||
struct ck_hs_iterator {
|
||||
void **cursor;
|
||||
unsigned long offset;
|
||||
};
|
||||
typedef struct ck_hs_iterator ck_hs_iterator_t;
|
||||
|
||||
#define CK_HS_ITERATOR_INITIALIZER { NULL, 0 }
|
||||
|
||||
/* Convenience wrapper to table hash function. */
|
||||
#define CK_HS_HASH(T, F, K) F((K), (T)->seed)
|
||||
|
||||
typedef void *ck_hs_apply_fn_t(void *, void *);
|
||||
bool ck_hs_apply(ck_hs_t *, unsigned long, const void *, ck_hs_apply_fn_t *, void *);
|
||||
void ck_hs_iterator_init(ck_hs_iterator_t *);
|
||||
bool ck_hs_next(ck_hs_t *, ck_hs_iterator_t *, void **);
|
||||
bool ck_hs_move(ck_hs_t *, ck_hs_t *, ck_hs_hash_cb_t *,
|
||||
ck_hs_compare_cb_t *, struct ck_malloc *);
|
||||
bool ck_hs_init(ck_hs_t *, unsigned int, ck_hs_hash_cb_t *,
|
||||
ck_hs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long);
|
||||
void ck_hs_destroy(ck_hs_t *);
|
||||
void *ck_hs_get(ck_hs_t *, unsigned long, const void *);
|
||||
bool ck_hs_put(ck_hs_t *, unsigned long, const void *);
|
||||
bool ck_hs_put_unique(ck_hs_t *, unsigned long, const void *);
|
||||
bool ck_hs_set(ck_hs_t *, unsigned long, const void *, void **);
|
||||
bool ck_hs_fas(ck_hs_t *, unsigned long, const void *, void **);
|
||||
void *ck_hs_remove(ck_hs_t *, unsigned long, const void *);
|
||||
bool ck_hs_grow(ck_hs_t *, unsigned long);
|
||||
bool ck_hs_rebuild(ck_hs_t *);
|
||||
bool ck_hs_gc(ck_hs_t *, unsigned long, unsigned long);
|
||||
unsigned long ck_hs_count(ck_hs_t *);
|
||||
bool ck_hs_reset(ck_hs_t *);
|
||||
bool ck_hs_reset_size(ck_hs_t *, unsigned long);
|
||||
void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *);
|
||||
|
||||
#endif /* CK_HS_H */
|
271
sys/contrib/ck/include/ck_ht.h
Normal file
271
sys/contrib/ck/include/ck_ht.h
Normal file
@ -0,0 +1,271 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_HT_H
|
||||
#define CK_HT_H
|
||||
|
||||
#include <ck_pr.h>
|
||||
|
||||
#define CK_F_HT
|
||||
#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_STORE_64)
|
||||
#define CK_HT_TYPE uint64_t
|
||||
#define CK_HT_TYPE_LOAD ck_pr_load_64
|
||||
#define CK_HT_TYPE_STORE ck_pr_store_64
|
||||
#define CK_HT_TYPE_MAX UINT64_MAX
|
||||
#else
|
||||
#define CK_HT_TYPE uint32_t
|
||||
#define CK_HT_TYPE_LOAD ck_pr_load_32
|
||||
#define CK_HT_TYPE_STORE ck_pr_store_32
|
||||
#define CK_HT_TYPE_MAX UINT32_MAX
|
||||
#endif
|
||||
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_malloc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_stdint.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
struct ck_ht_hash {
|
||||
uint64_t value;
|
||||
};
|
||||
typedef struct ck_ht_hash ck_ht_hash_t;
|
||||
|
||||
#define CK_HT_MODE_DIRECT 1U
|
||||
#define CK_HT_MODE_BYTESTRING 2U
|
||||
#define CK_HT_WORKLOAD_DELETE 4U
|
||||
|
||||
#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
|
||||
#define CK_HT_PP
|
||||
#define CK_HT_KEY_LENGTH ((sizeof(void *) * 8) - CK_MD_VMA_BITS)
|
||||
#define CK_HT_KEY_MASK ((1U << CK_HT_KEY_LENGTH) - 1)
|
||||
#else
|
||||
#define CK_HT_KEY_LENGTH 65535U
|
||||
#endif
|
||||
|
||||
struct ck_ht_entry {
|
||||
#ifdef CK_HT_PP
|
||||
uintptr_t key;
|
||||
uintptr_t value CK_CC_PACKED;
|
||||
} CK_CC_ALIGN(16);
|
||||
#else
|
||||
uintptr_t key;
|
||||
uintptr_t value;
|
||||
CK_HT_TYPE key_length;
|
||||
CK_HT_TYPE hash;
|
||||
} CK_CC_ALIGN(32);
|
||||
#endif
|
||||
typedef struct ck_ht_entry ck_ht_entry_t;
|
||||
|
||||
/*
|
||||
* The user is free to define their own stub values.
|
||||
*/
|
||||
#ifndef CK_HT_KEY_EMPTY
|
||||
#define CK_HT_KEY_EMPTY ((uintptr_t)0)
|
||||
#endif
|
||||
|
||||
#ifndef CK_HT_KEY_TOMBSTONE
|
||||
#define CK_HT_KEY_TOMBSTONE (~CK_HT_KEY_EMPTY)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Hash callback function. First argument is updated to contain a hash value,
|
||||
* second argument is the key, third argument is key length and final argument
|
||||
* is the hash table seed value.
|
||||
*/
|
||||
typedef void ck_ht_hash_cb_t(ck_ht_hash_t *, const void *, size_t, uint64_t);
|
||||
|
||||
struct ck_ht_map;
|
||||
struct ck_ht {
|
||||
struct ck_malloc *m;
|
||||
struct ck_ht_map *map;
|
||||
unsigned int mode;
|
||||
uint64_t seed;
|
||||
ck_ht_hash_cb_t *h;
|
||||
};
|
||||
typedef struct ck_ht ck_ht_t;
|
||||
|
||||
struct ck_ht_stat {
|
||||
uint64_t probe_maximum;
|
||||
uint64_t n_entries;
|
||||
};
|
||||
|
||||
struct ck_ht_iterator {
|
||||
struct ck_ht_entry *current;
|
||||
uint64_t offset;
|
||||
};
|
||||
typedef struct ck_ht_iterator ck_ht_iterator_t;
|
||||
|
||||
#define CK_HT_ITERATOR_INITIALIZER { NULL, 0 }
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_ht_iterator_init(struct ck_ht_iterator *iterator)
|
||||
{
|
||||
|
||||
iterator->current = NULL;
|
||||
iterator->offset = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ht_entry_empty(ck_ht_entry_t *entry)
|
||||
{
|
||||
|
||||
return entry->key == CK_HT_KEY_EMPTY;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_ht_entry_key_set_direct(ck_ht_entry_t *entry, uintptr_t key)
|
||||
{
|
||||
|
||||
entry->key = key;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_ht_entry_key_set(ck_ht_entry_t *entry, const void *key, uint16_t key_length)
|
||||
{
|
||||
|
||||
#ifdef CK_HT_PP
|
||||
entry->key = (uintptr_t)key | ((uintptr_t)key_length << CK_MD_VMA_BITS);
|
||||
#else
|
||||
entry->key = (uintptr_t)key;
|
||||
entry->key_length = key_length;
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_ht_entry_key(ck_ht_entry_t *entry)
|
||||
{
|
||||
|
||||
#ifdef CK_HT_PP
|
||||
return (void *)(entry->key & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
|
||||
#else
|
||||
return (void *)entry->key;
|
||||
#endif
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uint16_t
|
||||
ck_ht_entry_key_length(ck_ht_entry_t *entry)
|
||||
{
|
||||
|
||||
#ifdef CK_HT_PP
|
||||
return entry->key >> CK_MD_VMA_BITS;
|
||||
#else
|
||||
return entry->key_length;
|
||||
#endif
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_ht_entry_value(ck_ht_entry_t *entry)
|
||||
{
|
||||
|
||||
#ifdef CK_HT_PP
|
||||
return (void *)(entry->value & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
|
||||
#else
|
||||
return (void *)entry->value;
|
||||
#endif
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_ht_entry_set(struct ck_ht_entry *entry,
|
||||
ck_ht_hash_t h,
|
||||
const void *key,
|
||||
uint16_t key_length,
|
||||
const void *value)
|
||||
{
|
||||
|
||||
#ifdef CK_HT_PP
|
||||
entry->key = (uintptr_t)key | ((uintptr_t)key_length << CK_MD_VMA_BITS);
|
||||
entry->value = (uintptr_t)value | ((uintptr_t)(h.value >> 32) << CK_MD_VMA_BITS);
|
||||
#else
|
||||
entry->key = (uintptr_t)key;
|
||||
entry->value = (uintptr_t)value;
|
||||
entry->key_length = key_length;
|
||||
entry->hash = h.value;
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_ht_entry_set_direct(struct ck_ht_entry *entry,
|
||||
ck_ht_hash_t h,
|
||||
uintptr_t key,
|
||||
uintptr_t value)
|
||||
{
|
||||
|
||||
entry->key = key;
|
||||
entry->value = value;
|
||||
|
||||
#ifndef CK_HT_PP
|
||||
entry->hash = h.value;
|
||||
#else
|
||||
(void)h;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uintptr_t
|
||||
ck_ht_entry_key_direct(ck_ht_entry_t *entry)
|
||||
{
|
||||
|
||||
return entry->key;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uintptr_t
|
||||
ck_ht_entry_value_direct(ck_ht_entry_t *entry)
|
||||
{
|
||||
|
||||
return entry->value;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iteration must occur without any concurrent mutations on
|
||||
* the hash table.
|
||||
*/
|
||||
bool ck_ht_next(ck_ht_t *, ck_ht_iterator_t *, ck_ht_entry_t **entry);
|
||||
|
||||
void ck_ht_stat(ck_ht_t *, struct ck_ht_stat *);
|
||||
void ck_ht_hash(ck_ht_hash_t *, ck_ht_t *, const void *, uint16_t);
|
||||
void ck_ht_hash_direct(ck_ht_hash_t *, ck_ht_t *, uintptr_t);
|
||||
bool ck_ht_init(ck_ht_t *, unsigned int, ck_ht_hash_cb_t *,
|
||||
struct ck_malloc *, CK_HT_TYPE, uint64_t);
|
||||
void ck_ht_destroy(ck_ht_t *);
|
||||
bool ck_ht_set_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
|
||||
bool ck_ht_put_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
|
||||
bool ck_ht_get_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
|
||||
bool ck_ht_gc(struct ck_ht *, unsigned long, unsigned long);
|
||||
bool ck_ht_grow_spmc(ck_ht_t *, CK_HT_TYPE);
|
||||
bool ck_ht_remove_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
|
||||
bool ck_ht_reset_spmc(ck_ht_t *);
|
||||
bool ck_ht_reset_size_spmc(ck_ht_t *, CK_HT_TYPE);
|
||||
CK_HT_TYPE ck_ht_count(ck_ht_t *);
|
||||
|
||||
#endif /* CK_HT_H */
|
48
sys/contrib/ck/include/ck_limits.h
Normal file
48
sys/contrib/ck/include/ck_limits.h
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#if defined(__linux__) && defined(__KERNEL__)
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#ifndef UINT8_MAX
|
||||
#define UINT8_MAX ((u8)(~0U))
|
||||
#endif
|
||||
#ifndef UINT16_MAX
|
||||
#define UINT16_MAX USHRT_MAX
|
||||
#endif
|
||||
#ifndef UINT32_MAX
|
||||
#define UINT32_MAX UINT_MAX
|
||||
#endif
|
||||
#ifndef UINT64_MAX
|
||||
#define UINT64_MAX ULLONG_MAX
|
||||
#endif
|
||||
|
||||
#elif defined(__FreeBSD__) && defined(_KERNEL)
|
||||
#include <sys/stdint.h>
|
||||
#include <sys/limits.h>
|
||||
#else
|
||||
#include <limits.h>
|
||||
#endif /* __linux__ && __KERNEL__ */
|
39
sys/contrib/ck/include/ck_malloc.h
Normal file
39
sys/contrib/ck/include/ck_malloc.h
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_MALLOC_H
|
||||
#define CK_MALLOC_H
|
||||
|
||||
#include <ck_stdbool.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
struct ck_malloc {
|
||||
void *(*malloc)(size_t);
|
||||
void *(*realloc)(void *, size_t, size_t, bool);
|
||||
void (*free)(void *, size_t, bool);
|
||||
};
|
||||
|
||||
#endif /* CK_MALLOC_H */
|
142
sys/contrib/ck/include/ck_pflock.h
Normal file
142
sys/contrib/ck/include/ck_pflock.h
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* Copyright 2013 John Wittrock.
|
||||
* Copyright 2013-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PFLOCK_H
|
||||
#define CK_PFLOCK_H
|
||||
|
||||
/*
|
||||
* This is an implementation of phase-fair locks derived from the work
|
||||
* described in:
|
||||
* Brandenburg, B. and Anderson, J. 2010. Spin-Based
|
||||
* Reader-Writer Synchronization for Multiprocessor Real-Time Systems
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
|
||||
struct ck_pflock {
|
||||
uint32_t rin;
|
||||
uint32_t rout;
|
||||
uint32_t win;
|
||||
uint32_t wout;
|
||||
};
|
||||
typedef struct ck_pflock ck_pflock_t;
|
||||
|
||||
#define CK_PFLOCK_LSB 0xFFFFFFF0
|
||||
#define CK_PFLOCK_RINC 0x100 /* Reader increment value. */
|
||||
#define CK_PFLOCK_WBITS 0x3 /* Writer bits in reader. */
|
||||
#define CK_PFLOCK_PRES 0x2 /* Writer present bit. */
|
||||
#define CK_PFLOCK_PHID 0x1 /* Phase ID bit. */
|
||||
|
||||
#define CK_PFLOCK_INITIALIZER {0, 0, 0, 0}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pflock_init(struct ck_pflock *pf)
|
||||
{
|
||||
|
||||
pf->rin = 0;
|
||||
pf->rout = 0;
|
||||
pf->win = 0;
|
||||
pf->wout = 0;
|
||||
ck_pr_barrier();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pflock_write_unlock(ck_pflock_t *pf)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
|
||||
/* Migrate from write phase to read phase. */
|
||||
ck_pr_and_32(&pf->rin, CK_PFLOCK_LSB);
|
||||
|
||||
/* Allow other writers to continue. */
|
||||
ck_pr_faa_32(&pf->wout, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pflock_write_lock(ck_pflock_t *pf)
|
||||
{
|
||||
uint32_t ticket;
|
||||
|
||||
/* Acquire ownership of write-phase. */
|
||||
ticket = ck_pr_faa_32(&pf->win, 1);
|
||||
while (ck_pr_load_32(&pf->wout) != ticket)
|
||||
ck_pr_stall();
|
||||
|
||||
/*
|
||||
* Acquire ticket on read-side in order to allow them
|
||||
* to flush. Indicates to any incoming reader that a
|
||||
* write-phase is pending.
|
||||
*/
|
||||
ticket = ck_pr_faa_32(&pf->rin,
|
||||
(ticket & CK_PFLOCK_PHID) | CK_PFLOCK_PRES);
|
||||
|
||||
/* Wait for any pending readers to flush. */
|
||||
while (ck_pr_load_32(&pf->rout) != ticket)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pflock_read_unlock(ck_pflock_t *pf)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_faa_32(&pf->rout, CK_PFLOCK_RINC);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pflock_read_lock(ck_pflock_t *pf)
|
||||
{
|
||||
uint32_t w;
|
||||
|
||||
/*
|
||||
* If no writer is present, then the operation has completed
|
||||
* successfully.
|
||||
*/
|
||||
w = ck_pr_faa_32(&pf->rin, CK_PFLOCK_RINC) & CK_PFLOCK_WBITS;
|
||||
if (w == 0)
|
||||
goto leave;
|
||||
|
||||
/* Wait for current write phase to complete. */
|
||||
while ((ck_pr_load_32(&pf->rin) & CK_PFLOCK_WBITS) == w)
|
||||
ck_pr_stall();
|
||||
|
||||
leave:
|
||||
/* Acquire semantics with respect to readers. */
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CK_PFLOCK_H */
|
1211
sys/contrib/ck/include/ck_pr.h
Normal file
1211
sys/contrib/ck/include/ck_pr.h
Normal file
File diff suppressed because it is too large
Load Diff
428
sys/contrib/ck/include/ck_queue.h
Normal file
428
sys/contrib/ck/include/ck_queue.h
Normal file
@ -0,0 +1,428 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 4. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)queue.h 8.5 (Berkeley) 8/20/94
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef CK_QUEUE_H
|
||||
#define CK_QUEUE_H
|
||||
|
||||
#include <ck_pr.h>
|
||||
|
||||
/*
|
||||
* This file defines three types of data structures: singly-linked lists,
|
||||
* singly-linked tail queues and lists.
|
||||
*
|
||||
* A singly-linked list is headed by a single forward pointer. The elements
|
||||
* are singly linked for minimum space and pointer manipulation overhead at
|
||||
* the expense of O(n) removal for arbitrary elements. New elements can be
|
||||
* added to the list after an existing element or at the head of the list.
|
||||
* Elements being removed from the head of the list should use the explicit
|
||||
* macro for this purpose for optimum efficiency. A singly-linked list may
|
||||
* only be traversed in the forward direction. Singly-linked lists are ideal
|
||||
* for applications with large datasets and few or no removals or for
|
||||
* implementing a LIFO queue.
|
||||
*
|
||||
* A singly-linked tail queue is headed by a pair of pointers, one to the
|
||||
* head of the list and the other to the tail of the list. The elements are
|
||||
* singly linked for minimum space and pointer manipulation overhead at the
|
||||
* expense of O(n) removal for arbitrary elements. New elements can be added
|
||||
* to the list after an existing element, at the head of the list, or at the
|
||||
* end of the list. Elements being removed from the head of the tail queue
|
||||
* should use the explicit macro for this purpose for optimum efficiency.
|
||||
* A singly-linked tail queue may only be traversed in the forward direction.
|
||||
* Singly-linked tail queues are ideal for applications with large datasets
|
||||
* and few or no removals or for implementing a FIFO queue.
|
||||
*
|
||||
* A list is headed by a single forward pointer (or an array of forward
|
||||
* pointers for a hash table header). The elements are doubly linked
|
||||
* so that an arbitrary element can be removed without a need to
|
||||
* traverse the list. New elements can be added to the list before
|
||||
* or after an existing element or at the head of the list. A list
|
||||
* may only be traversed in the forward direction.
|
||||
*
|
||||
* It is safe to use _FOREACH/_FOREACH_SAFE in the presence of concurrent
|
||||
* modifications to the list. Writers to these lists must, on the other hand,
|
||||
* implement writer-side synchronization. The _SWAP operations are not atomic.
|
||||
* This facility is currently unsupported on architectures such as the Alpha
|
||||
* which require load-depend memory fences.
|
||||
*
|
||||
* CK_SLIST CK_LIST CK_STAILQ
|
||||
* _HEAD + + +
|
||||
* _HEAD_INITIALIZER + + +
|
||||
* _ENTRY + + +
|
||||
* _INIT + + +
|
||||
* _EMPTY + + +
|
||||
* _FIRST + + +
|
||||
* _NEXT + + +
|
||||
* _FOREACH + + +
|
||||
* _FOREACH_SAFE + + +
|
||||
* _INSERT_HEAD + + +
|
||||
* _INSERT_BEFORE - + -
|
||||
* _INSERT_AFTER + + +
|
||||
* _INSERT_TAIL - - +
|
||||
* _REMOVE_AFTER + - +
|
||||
* _REMOVE_HEAD + - +
|
||||
* _REMOVE + + +
|
||||
* _SWAP + + +
|
||||
* _MOVE + + +
|
||||
*/
|
||||
|
||||
/*
|
||||
* Singly-linked List declarations.
|
||||
*/
|
||||
#define CK_SLIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *slh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define CK_SLIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define CK_SLIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *sle_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked List functions.
|
||||
*/
|
||||
#define CK_SLIST_EMPTY(head) \
|
||||
(ck_pr_load_ptr(&(head)->slh_first) == NULL)
|
||||
|
||||
#define CK_SLIST_FIRST(head) \
|
||||
(ck_pr_load_ptr(&(head)->slh_first))
|
||||
|
||||
#define CK_SLIST_NEXT(elm, field) \
|
||||
ck_pr_load_ptr(&((elm)->field.sle_next))
|
||||
|
||||
#define CK_SLIST_FOREACH(var, head, field) \
|
||||
for ((var) = CK_SLIST_FIRST((head)); \
|
||||
(var) && (ck_pr_fence_load(), 1); \
|
||||
(var) = CK_SLIST_NEXT((var), field))
|
||||
|
||||
#define CK_SLIST_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = CK_SLIST_FIRST(head); \
|
||||
(var) && (ck_pr_fence_load(), (tvar) = CK_SLIST_NEXT(var, field), 1);\
|
||||
(var) = (tvar))
|
||||
|
||||
#define CK_SLIST_FOREACH_PREVPTR(var, varp, head, field) \
|
||||
for ((varp) = &(head)->slh_first; \
|
||||
((var) = ck_pr_load_ptr(varp)) != NULL && (ck_pr_fence_load(), 1); \
|
||||
(varp) = &(var)->field.sle_next)
|
||||
|
||||
#define CK_SLIST_INIT(head) do { \
|
||||
ck_pr_store_ptr(&(head)->slh_first, NULL); \
|
||||
ck_pr_fence_store(); \
|
||||
} while (0)
|
||||
|
||||
#define CK_SLIST_INSERT_AFTER(a, b, field) do { \
|
||||
(b)->field.sle_next = (a)->field.sle_next; \
|
||||
ck_pr_fence_store(); \
|
||||
ck_pr_store_ptr(&(a)->field.sle_next, b); \
|
||||
} while (0)
|
||||
|
||||
#define CK_SLIST_INSERT_HEAD(head, elm, field) do { \
|
||||
(elm)->field.sle_next = (head)->slh_first; \
|
||||
ck_pr_fence_store(); \
|
||||
ck_pr_store_ptr(&(head)->slh_first, elm); \
|
||||
} while (0)
|
||||
|
||||
#define CK_SLIST_REMOVE_AFTER(elm, field) do { \
|
||||
ck_pr_store_ptr(&(elm)->field.sle_next, \
|
||||
(elm)->field.sle_next->field.sle_next); \
|
||||
} while (0)
|
||||
|
||||
#define CK_SLIST_REMOVE(head, elm, type, field) do { \
|
||||
if ((head)->slh_first == (elm)) { \
|
||||
CK_SLIST_REMOVE_HEAD((head), field); \
|
||||
} else { \
|
||||
struct type *curelm = (head)->slh_first; \
|
||||
while (curelm->field.sle_next != (elm)) \
|
||||
curelm = curelm->field.sle_next; \
|
||||
CK_SLIST_REMOVE_AFTER(curelm, field); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CK_SLIST_REMOVE_HEAD(head, field) do { \
|
||||
ck_pr_store_ptr(&(head)->slh_first, \
|
||||
(head)->slh_first->field.sle_next); \
|
||||
} while (0)
|
||||
|
||||
#define CK_SLIST_MOVE(head1, head2, field) do { \
|
||||
ck_pr_store_ptr(&(head1)->slh_first, (head2)->slh_first); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This operation is not applied atomically.
|
||||
*/
|
||||
#define CK_SLIST_SWAP(a, b, type) do { \
|
||||
struct type *swap_first = (a)->slh_first; \
|
||||
(a)->slh_first = (b)->slh_first; \
|
||||
(b)->slh_first = swap_first; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue declarations.
|
||||
*/
|
||||
#define CK_STAILQ_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *stqh_first;/* first element */ \
|
||||
struct type **stqh_last;/* addr of last next element */ \
|
||||
}
|
||||
|
||||
#define CK_STAILQ_HEAD_INITIALIZER(head) \
|
||||
{ NULL, &(head).stqh_first }
|
||||
|
||||
#define CK_STAILQ_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *stqe_next; /* next element */ \
|
||||
}
|
||||
|
||||
/*
|
||||
* Singly-linked Tail queue functions.
|
||||
*/
|
||||
#define CK_STAILQ_CONCAT(head1, head2) do { \
|
||||
if ((head2)->stqh_first == NULL) { \
|
||||
ck_pr_store_ptr((head1)->stqh_last, (head2)->stqh_first); \
|
||||
ck_pr_fence_store(); \
|
||||
(head1)->stqh_last = (head2)->stqh_last; \
|
||||
CK_STAILQ_INIT((head2)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_EMPTY(head) (ck_pr_load_ptr(&(head)->stqh_first) == NULL)
|
||||
|
||||
#define CK_STAILQ_FIRST(head) (ck_pr_load_ptr(&(head)->stqh_first))
|
||||
|
||||
#define CK_STAILQ_FOREACH(var, head, field) \
|
||||
for((var) = CK_STAILQ_FIRST((head)); \
|
||||
(var) && (ck_pr_fence_load(), 1); \
|
||||
(var) = CK_STAILQ_NEXT((var), field))
|
||||
|
||||
#define CK_STAILQ_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = CK_STAILQ_FIRST((head)); \
|
||||
(var) && (ck_pr_fence_load(), (tvar) = \
|
||||
CK_STAILQ_NEXT((var), field), 1); \
|
||||
(var) = (tvar))
|
||||
|
||||
#define CK_STAILQ_INIT(head) do { \
|
||||
ck_pr_store_ptr(&(head)->stqh_first, NULL); \
|
||||
ck_pr_fence_store(); \
|
||||
(head)->stqh_last = &(head)->stqh_first; \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
|
||||
(elm)->field.stqe_next = (tqelm)->field.stqe_next; \
|
||||
ck_pr_fence_store(); \
|
||||
ck_pr_store_ptr(&(tqelm)->field.stqe_next, elm); \
|
||||
if ((elm)->field.stqe_next == NULL) \
|
||||
(head)->stqh_last = &(elm)->field.stqe_next; \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_INSERT_HEAD(head, elm, field) do { \
|
||||
(elm)->field.stqe_next = (head)->stqh_first; \
|
||||
ck_pr_fence_store(); \
|
||||
ck_pr_store_ptr(&(head)->stqh_first, elm); \
|
||||
if ((elm)->field.stqe_next == NULL) \
|
||||
(head)->stqh_last = &(elm)->field.stqe_next; \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_INSERT_TAIL(head, elm, field) do { \
|
||||
(elm)->field.stqe_next = NULL; \
|
||||
ck_pr_fence_store(); \
|
||||
ck_pr_store_ptr((head)->stqh_last, (elm)); \
|
||||
(head)->stqh_last = &(elm)->field.stqe_next; \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_NEXT(elm, field) \
|
||||
(ck_pr_load_ptr(&(elm)->field.stqe_next))
|
||||
|
||||
#define CK_STAILQ_REMOVE(head, elm, type, field) do { \
|
||||
if ((head)->stqh_first == (elm)) { \
|
||||
CK_STAILQ_REMOVE_HEAD((head), field); \
|
||||
} else { \
|
||||
struct type *curelm = (head)->stqh_first; \
|
||||
while (curelm->field.stqe_next != (elm)) \
|
||||
curelm = curelm->field.stqe_next; \
|
||||
CK_STAILQ_REMOVE_AFTER(head, curelm, field); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_REMOVE_AFTER(head, elm, field) do { \
|
||||
ck_pr_store_ptr(&(elm)->field.stqe_next, \
|
||||
(elm)->field.stqe_next->field.stqe_next); \
|
||||
if ((elm)->field.stqe_next == NULL) \
|
||||
(head)->stqh_last = &(elm)->field.stqe_next; \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_REMOVE_HEAD(head, field) do { \
|
||||
ck_pr_store_ptr(&(head)->stqh_first, \
|
||||
(head)->stqh_first->field.stqe_next); \
|
||||
if ((head)->stqh_first == NULL) \
|
||||
(head)->stqh_last = &(head)->stqh_first; \
|
||||
} while (0)
|
||||
|
||||
#define CK_STAILQ_MOVE(head1, head2, field) do { \
|
||||
ck_pr_store_ptr(&(head1)->stqh_first, (head2)->stqh_first); \
|
||||
(head1)->stqh_last = (head2)->stqh_last; \
|
||||
if ((head2)->stqh_last == &(head2)->stqh_first) \
|
||||
(head1)->stqh_last = &(head1)->stqh_first; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This operation is not applied atomically.
|
||||
*/
|
||||
#define CK_STAILQ_SWAP(head1, head2, type) do { \
|
||||
struct type *swap_first = CK_STAILQ_FIRST(head1); \
|
||||
struct type **swap_last = (head1)->stqh_last; \
|
||||
CK_STAILQ_FIRST(head1) = CK_STAILQ_FIRST(head2); \
|
||||
(head1)->stqh_last = (head2)->stqh_last; \
|
||||
CK_STAILQ_FIRST(head2) = swap_first; \
|
||||
(head2)->stqh_last = swap_last; \
|
||||
if (CK_STAILQ_EMPTY(head1)) \
|
||||
(head1)->stqh_last = &(head1)->stqh_first; \
|
||||
if (CK_STAILQ_EMPTY(head2)) \
|
||||
(head2)->stqh_last = &(head2)->stqh_first; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* List declarations.
|
||||
*/
|
||||
#define CK_LIST_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type *lh_first; /* first element */ \
|
||||
}
|
||||
|
||||
#define CK_LIST_HEAD_INITIALIZER(head) \
|
||||
{ NULL }
|
||||
|
||||
#define CK_LIST_ENTRY(type) \
|
||||
struct { \
|
||||
struct type *le_next; /* next element */ \
|
||||
struct type **le_prev; /* address of previous next element */ \
|
||||
}
|
||||
|
||||
#define CK_LIST_FIRST(head) ck_pr_load_ptr(&(head)->lh_first)
|
||||
#define CK_LIST_EMPTY(head) (CK_LIST_FIRST(head) == NULL)
|
||||
#define CK_LIST_NEXT(elm, field) ck_pr_load_ptr(&(elm)->field.le_next)
|
||||
|
||||
#define CK_LIST_FOREACH(var, head, field) \
|
||||
for ((var) = CK_LIST_FIRST((head)); \
|
||||
(var) && (ck_pr_fence_load(), 1); \
|
||||
(var) = CK_LIST_NEXT((var), field))
|
||||
|
||||
#define CK_LIST_FOREACH_SAFE(var, head, field, tvar) \
|
||||
for ((var) = CK_LIST_FIRST((head)); \
|
||||
(var) && (ck_pr_fence_load(), (tvar) = CK_LIST_NEXT((var), field), 1);\
|
||||
(var) = (tvar))
|
||||
|
||||
#define CK_LIST_INIT(head) do { \
|
||||
ck_pr_store_ptr(&(head)->lh_first, NULL); \
|
||||
ck_pr_fence_store(); \
|
||||
} while (0)
|
||||
|
||||
#define CK_LIST_INSERT_AFTER(listelm, elm, field) do { \
|
||||
(elm)->field.le_next = (listelm)->field.le_next; \
|
||||
(elm)->field.le_prev = &(listelm)->field.le_next; \
|
||||
ck_pr_fence_store(); \
|
||||
if ((listelm)->field.le_next != NULL) \
|
||||
(listelm)->field.le_next->field.le_prev = &(elm)->field.le_next;\
|
||||
ck_pr_store_ptr(&(listelm)->field.le_next, elm); \
|
||||
} while (0)
|
||||
|
||||
#define CK_LIST_INSERT_BEFORE(listelm, elm, field) do { \
|
||||
(elm)->field.le_prev = (listelm)->field.le_prev; \
|
||||
(elm)->field.le_next = (listelm); \
|
||||
ck_pr_fence_store(); \
|
||||
ck_pr_store_ptr((listelm)->field.le_prev, (elm)); \
|
||||
(listelm)->field.le_prev = &(elm)->field.le_next; \
|
||||
} while (0)
|
||||
|
||||
#define CK_LIST_INSERT_HEAD(head, elm, field) do { \
|
||||
(elm)->field.le_next = (head)->lh_first; \
|
||||
ck_pr_fence_store(); \
|
||||
if ((elm)->field.le_next != NULL) \
|
||||
(head)->lh_first->field.le_prev = &(elm)->field.le_next; \
|
||||
ck_pr_store_ptr(&(head)->lh_first, elm); \
|
||||
(elm)->field.le_prev = &(head)->lh_first; \
|
||||
} while (0)
|
||||
|
||||
#define CK_LIST_REMOVE(elm, field) do { \
|
||||
ck_pr_store_ptr((elm)->field.le_prev, (elm)->field.le_next); \
|
||||
if ((elm)->field.le_next != NULL) \
|
||||
(elm)->field.le_next->field.le_prev = (elm)->field.le_prev; \
|
||||
} while (0)
|
||||
|
||||
#define CK_LIST_MOVE(head1, head2, field) do { \
|
||||
ck_pr_store_ptr(&(head1)->lh_first, (head2)->lh_first); \
|
||||
if ((head1)->lh_first != NULL) \
|
||||
(head1)->lh_first->field.le_prev = &(head1)->lh_first; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This operation is not applied atomically.
|
||||
*/
|
||||
#define CK_LIST_SWAP(head1, head2, type, field) do { \
|
||||
struct type *swap_tmp = (head1)->lh_first; \
|
||||
(head1)->lh_first = (head2)->lh_first; \
|
||||
(head2)->lh_first = swap_tmp; \
|
||||
if ((swap_tmp = (head1)->lh_first) != NULL) \
|
||||
swap_tmp->field.le_prev = &(head1)->lh_first; \
|
||||
if ((swap_tmp = (head2)->lh_first) != NULL) \
|
||||
swap_tmp->field.le_prev = &(head2)->lh_first; \
|
||||
} while (0)
|
||||
|
||||
#endif /* CK_QUEUE_H */
|
134
sys/contrib/ck/include/ck_rhs.h
Normal file
134
sys/contrib/ck/include/ck_rhs.h
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_RHS_H
|
||||
#define CK_RHS_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_malloc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdint.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
/*
|
||||
* Indicates a single-writer many-reader workload. Mutually
|
||||
* exclusive with CK_RHS_MODE_MPMC
|
||||
*/
|
||||
#define CK_RHS_MODE_SPMC 1
|
||||
|
||||
/*
|
||||
* Indicates that values to be stored are not pointers but
|
||||
* values. Allows for full precision. Mutually exclusive
|
||||
* with CK_RHS_MODE_OBJECT.
|
||||
*/
|
||||
#define CK_RHS_MODE_DIRECT 2
|
||||
|
||||
/*
|
||||
* Indicates that the values to be stored are pointers.
|
||||
* Allows for space optimizations in the presence of pointer
|
||||
* packing. Mutually exclusive with CK_RHS_MODE_DIRECT.
|
||||
*/
|
||||
#define CK_RHS_MODE_OBJECT 8
|
||||
|
||||
/*
|
||||
* Indicated that the load is read-mostly, so get should be optimized
|
||||
* over put and delete
|
||||
*/
|
||||
#define CK_RHS_MODE_READ_MOSTLY 16
|
||||
|
||||
/* Currently unsupported. */
|
||||
#define CK_RHS_MODE_MPMC (void)
|
||||
|
||||
/*
|
||||
* Hash callback function.
|
||||
*/
|
||||
typedef unsigned long ck_rhs_hash_cb_t(const void *, unsigned long);
|
||||
|
||||
/*
|
||||
* Returns pointer to object if objects are equivalent.
|
||||
*/
|
||||
typedef bool ck_rhs_compare_cb_t(const void *, const void *);
|
||||
|
||||
#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
|
||||
#define CK_RHS_PP
|
||||
#define CK_RHS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1)
|
||||
#endif
|
||||
|
||||
struct ck_rhs_map;
|
||||
struct ck_rhs {
|
||||
struct ck_malloc *m;
|
||||
struct ck_rhs_map *map;
|
||||
unsigned int mode;
|
||||
unsigned int load_factor;
|
||||
unsigned long seed;
|
||||
ck_rhs_hash_cb_t *hf;
|
||||
ck_rhs_compare_cb_t *compare;
|
||||
};
|
||||
typedef struct ck_rhs ck_rhs_t;
|
||||
|
||||
struct ck_rhs_stat {
|
||||
unsigned long n_entries;
|
||||
unsigned int probe_maximum;
|
||||
};
|
||||
|
||||
struct ck_rhs_iterator {
|
||||
void **cursor;
|
||||
unsigned long offset;
|
||||
};
|
||||
typedef struct ck_rhs_iterator ck_rhs_iterator_t;
|
||||
|
||||
#define CK_RHS_ITERATOR_INITIALIZER { NULL, 0 }
|
||||
|
||||
/* Convenience wrapper to table hash function. */
|
||||
#define CK_RHS_HASH(T, F, K) F((K), (T)->seed)
|
||||
|
||||
typedef void *ck_rhs_apply_fn_t(void *, void *);
|
||||
bool ck_rhs_apply(ck_rhs_t *, unsigned long, const void *, ck_rhs_apply_fn_t *, void *);
|
||||
void ck_rhs_iterator_init(ck_rhs_iterator_t *);
|
||||
bool ck_rhs_next(ck_rhs_t *, ck_rhs_iterator_t *, void **);
|
||||
bool ck_rhs_move(ck_rhs_t *, ck_rhs_t *, ck_rhs_hash_cb_t *,
|
||||
ck_rhs_compare_cb_t *, struct ck_malloc *);
|
||||
bool ck_rhs_init(ck_rhs_t *, unsigned int, ck_rhs_hash_cb_t *,
|
||||
ck_rhs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long);
|
||||
void ck_rhs_destroy(ck_rhs_t *);
|
||||
void *ck_rhs_get(ck_rhs_t *, unsigned long, const void *);
|
||||
bool ck_rhs_put(ck_rhs_t *, unsigned long, const void *);
|
||||
bool ck_rhs_put_unique(ck_rhs_t *, unsigned long, const void *);
|
||||
bool ck_rhs_set(ck_rhs_t *, unsigned long, const void *, void **);
|
||||
bool ck_rhs_fas(ck_rhs_t *, unsigned long, const void *, void **);
|
||||
void *ck_rhs_remove(ck_rhs_t *, unsigned long, const void *);
|
||||
bool ck_rhs_grow(ck_rhs_t *, unsigned long);
|
||||
bool ck_rhs_rebuild(ck_rhs_t *);
|
||||
bool ck_rhs_gc(ck_rhs_t *);
|
||||
unsigned long ck_rhs_count(ck_rhs_t *);
|
||||
bool ck_rhs_reset(ck_rhs_t *);
|
||||
bool ck_rhs_reset_size(ck_rhs_t *, unsigned long);
|
||||
void ck_rhs_stat(ck_rhs_t *, struct ck_rhs_stat *);
|
||||
bool ck_rhs_set_load_factor(ck_rhs_t *, unsigned int);
|
||||
|
||||
#endif /* CK_RHS_H */
|
656
sys/contrib/ck/include/ck_ring.h
Normal file
656
sys/contrib/ck/include/ck_ring.h
Normal file
@ -0,0 +1,656 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_RING_H
|
||||
#define CK_RING_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
/*
|
||||
* Concurrent ring buffer.
|
||||
*/
|
||||
|
||||
struct ck_ring {
|
||||
unsigned int c_head;
|
||||
char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
|
||||
unsigned int p_tail;
|
||||
unsigned int p_head;
|
||||
char _pad[CK_MD_CACHELINE - sizeof(unsigned int) * 2];
|
||||
unsigned int size;
|
||||
unsigned int mask;
|
||||
};
|
||||
typedef struct ck_ring ck_ring_t;
|
||||
|
||||
struct ck_ring_buffer {
|
||||
void *value;
|
||||
};
|
||||
typedef struct ck_ring_buffer ck_ring_buffer_t;
|
||||
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_ring_size(const struct ck_ring *ring)
|
||||
{
|
||||
unsigned int c, p;
|
||||
|
||||
c = ck_pr_load_uint(&ring->c_head);
|
||||
p = ck_pr_load_uint(&ring->p_tail);
|
||||
return (p - c) & ring->mask;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_ring_capacity(const struct ck_ring *ring)
|
||||
{
|
||||
return ring->size;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_ring_init(struct ck_ring *ring, unsigned int size)
|
||||
{
|
||||
|
||||
ring->size = size;
|
||||
ring->mask = size - 1;
|
||||
ring->p_tail = 0;
|
||||
ring->p_head = 0;
|
||||
ring->c_head = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The _ck_ring_* namespace is internal only and must not used externally.
|
||||
*/
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
_ck_ring_enqueue_sp(struct ck_ring *ring,
|
||||
void *CK_CC_RESTRICT buffer,
|
||||
const void *CK_CC_RESTRICT entry,
|
||||
unsigned int ts,
|
||||
unsigned int *size)
|
||||
{
|
||||
const unsigned int mask = ring->mask;
|
||||
unsigned int consumer, producer, delta;
|
||||
|
||||
consumer = ck_pr_load_uint(&ring->c_head);
|
||||
producer = ring->p_tail;
|
||||
delta = producer + 1;
|
||||
if (size != NULL)
|
||||
*size = (producer - consumer) & mask;
|
||||
|
||||
if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask)))
|
||||
return false;
|
||||
|
||||
buffer = (char *)buffer + ts * (producer & mask);
|
||||
memcpy(buffer, entry, ts);
|
||||
|
||||
/*
|
||||
* Make sure to update slot value before indicating
|
||||
* that the slot is available for consumption.
|
||||
*/
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&ring->p_tail, delta);
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
_ck_ring_enqueue_sp_size(struct ck_ring *ring,
|
||||
void *CK_CC_RESTRICT buffer,
|
||||
const void *CK_CC_RESTRICT entry,
|
||||
unsigned int ts,
|
||||
unsigned int *size)
|
||||
{
|
||||
unsigned int sz;
|
||||
bool r;
|
||||
|
||||
r = _ck_ring_enqueue_sp(ring, buffer, entry, ts, &sz);
|
||||
*size = sz;
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
_ck_ring_dequeue_sc(struct ck_ring *ring,
|
||||
const void *CK_CC_RESTRICT buffer,
|
||||
void *CK_CC_RESTRICT target,
|
||||
unsigned int size)
|
||||
{
|
||||
const unsigned int mask = ring->mask;
|
||||
unsigned int consumer, producer;
|
||||
|
||||
consumer = ring->c_head;
|
||||
producer = ck_pr_load_uint(&ring->p_tail);
|
||||
|
||||
if (CK_CC_UNLIKELY(consumer == producer))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Make sure to serialize with respect to our snapshot
|
||||
* of the producer counter.
|
||||
*/
|
||||
ck_pr_fence_load();
|
||||
|
||||
buffer = (const char *)buffer + size * (consumer & mask);
|
||||
memcpy(target, buffer, size);
|
||||
|
||||
/*
|
||||
* Make sure copy is completed with respect to consumer
|
||||
* update.
|
||||
*/
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&ring->c_head, consumer + 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
_ck_ring_enqueue_mp(struct ck_ring *ring,
|
||||
void *buffer,
|
||||
const void *entry,
|
||||
unsigned int ts,
|
||||
unsigned int *size)
|
||||
{
|
||||
const unsigned int mask = ring->mask;
|
||||
unsigned int producer, consumer, delta;
|
||||
bool r = true;
|
||||
|
||||
producer = ck_pr_load_uint(&ring->p_head);
|
||||
|
||||
do {
|
||||
/*
|
||||
* The snapshot of producer must be up to date with
|
||||
* respect to consumer.
|
||||
*/
|
||||
ck_pr_fence_load();
|
||||
consumer = ck_pr_load_uint(&ring->c_head);
|
||||
|
||||
delta = producer + 1;
|
||||
if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask))) {
|
||||
r = false;
|
||||
goto leave;
|
||||
}
|
||||
} while (ck_pr_cas_uint_value(&ring->p_head,
|
||||
producer,
|
||||
delta,
|
||||
&producer) == false);
|
||||
|
||||
buffer = (char *)buffer + ts * (producer & mask);
|
||||
memcpy(buffer, entry, ts);
|
||||
|
||||
/*
|
||||
* Wait until all concurrent producers have completed writing
|
||||
* their data into the ring buffer.
|
||||
*/
|
||||
while (ck_pr_load_uint(&ring->p_tail) != producer)
|
||||
ck_pr_stall();
|
||||
|
||||
/*
|
||||
* Ensure that copy is completed before updating shared producer
|
||||
* counter.
|
||||
*/
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&ring->p_tail, delta);
|
||||
|
||||
leave:
|
||||
if (size != NULL)
|
||||
*size = (producer - consumer) & mask;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
_ck_ring_enqueue_mp_size(struct ck_ring *ring,
|
||||
void *buffer,
|
||||
const void *entry,
|
||||
unsigned int ts,
|
||||
unsigned int *size)
|
||||
{
|
||||
unsigned int sz;
|
||||
bool r;
|
||||
|
||||
r = _ck_ring_enqueue_mp(ring, buffer, entry, ts, &sz);
|
||||
*size = sz;
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
_ck_ring_trydequeue_mc(struct ck_ring *ring,
|
||||
const void *buffer,
|
||||
void *data,
|
||||
unsigned int size)
|
||||
{
|
||||
const unsigned int mask = ring->mask;
|
||||
unsigned int consumer, producer;
|
||||
|
||||
consumer = ck_pr_load_uint(&ring->c_head);
|
||||
ck_pr_fence_load();
|
||||
producer = ck_pr_load_uint(&ring->p_tail);
|
||||
|
||||
if (CK_CC_UNLIKELY(consumer == producer))
|
||||
return false;
|
||||
|
||||
ck_pr_fence_load();
|
||||
|
||||
buffer = (const char *)buffer + size * (consumer & mask);
|
||||
memcpy(data, buffer, size);
|
||||
|
||||
ck_pr_fence_store_atomic();
|
||||
return ck_pr_cas_uint(&ring->c_head, consumer, consumer + 1);
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static bool
|
||||
_ck_ring_dequeue_mc(struct ck_ring *ring,
|
||||
const void *buffer,
|
||||
void *data,
|
||||
unsigned int ts)
|
||||
{
|
||||
const unsigned int mask = ring->mask;
|
||||
unsigned int consumer, producer;
|
||||
|
||||
consumer = ck_pr_load_uint(&ring->c_head);
|
||||
|
||||
do {
|
||||
const char *target;
|
||||
|
||||
/*
|
||||
* Producer counter must represent state relative to
|
||||
* our latest consumer snapshot.
|
||||
*/
|
||||
ck_pr_fence_load();
|
||||
producer = ck_pr_load_uint(&ring->p_tail);
|
||||
|
||||
if (CK_CC_UNLIKELY(consumer == producer))
|
||||
return false;
|
||||
|
||||
ck_pr_fence_load();
|
||||
|
||||
target = (const char *)buffer + ts * (consumer & mask);
|
||||
memcpy(data, target, ts);
|
||||
|
||||
/* Serialize load with respect to head update. */
|
||||
ck_pr_fence_store_atomic();
|
||||
} while (ck_pr_cas_uint_value(&ring->c_head,
|
||||
consumer,
|
||||
consumer + 1,
|
||||
&consumer) == false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The ck_ring_*_spsc namespace is the public interface for interacting with a
|
||||
* ring buffer containing pointers. Correctness is only provided if there is up
|
||||
* to one concurrent consumer and up to one concurrent producer.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_spsc_size(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry,
|
||||
unsigned int *size)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_sp_size(ring, buffer, &entry,
|
||||
sizeof(entry), size);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_spsc(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_sp(ring, buffer,
|
||||
&entry, sizeof(entry), NULL);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_dequeue_spsc(struct ck_ring *ring,
|
||||
const struct ck_ring_buffer *buffer,
|
||||
void *data)
|
||||
{
|
||||
|
||||
return _ck_ring_dequeue_sc(ring, buffer,
|
||||
(void **)data, sizeof(void *));
|
||||
}
|
||||
|
||||
/*
|
||||
* The ck_ring_*_mpmc namespace is the public interface for interacting with a
|
||||
* ring buffer containing pointers. Correctness is provided for any number of
|
||||
* producers and consumers.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_mpmc(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_mp(ring, buffer, &entry,
|
||||
sizeof(entry), NULL);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_mpmc_size(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry,
|
||||
unsigned int *size)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_mp_size(ring, buffer, &entry,
|
||||
sizeof(entry), size);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_trydequeue_mpmc(struct ck_ring *ring,
|
||||
const struct ck_ring_buffer *buffer,
|
||||
void *data)
|
||||
{
|
||||
|
||||
return _ck_ring_trydequeue_mc(ring,
|
||||
buffer, (void **)data, sizeof(void *));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_dequeue_mpmc(struct ck_ring *ring,
|
||||
const struct ck_ring_buffer *buffer,
|
||||
void *data)
|
||||
{
|
||||
|
||||
return _ck_ring_dequeue_mc(ring, buffer, (void **)data,
|
||||
sizeof(void *));
|
||||
}
|
||||
|
||||
/*
|
||||
* The ck_ring_*_spmc namespace is the public interface for interacting with a
|
||||
* ring buffer containing pointers. Correctness is provided for any number of
|
||||
* consumers with up to one concurrent producer.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_spmc_size(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry,
|
||||
unsigned int *size)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_sp_size(ring, buffer, &entry,
|
||||
sizeof(entry), size);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_spmc(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_sp(ring, buffer, &entry,
|
||||
sizeof(entry), NULL);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_trydequeue_spmc(struct ck_ring *ring,
|
||||
const struct ck_ring_buffer *buffer,
|
||||
void *data)
|
||||
{
|
||||
|
||||
return _ck_ring_trydequeue_mc(ring, buffer, (void **)data, sizeof(void *));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_dequeue_spmc(struct ck_ring *ring,
|
||||
const struct ck_ring_buffer *buffer,
|
||||
void *data)
|
||||
{
|
||||
|
||||
return _ck_ring_dequeue_mc(ring, buffer, (void **)data, sizeof(void *));
|
||||
}
|
||||
|
||||
/*
|
||||
* The ck_ring_*_mpsc namespace is the public interface for interacting with a
|
||||
* ring buffer containing pointers. Correctness is provided for any number of
|
||||
* producers with up to one concurrent consumers.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_mpsc(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_mp(ring, buffer, &entry,
|
||||
sizeof(entry), NULL);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_enqueue_mpsc_size(struct ck_ring *ring,
|
||||
struct ck_ring_buffer *buffer,
|
||||
const void *entry,
|
||||
unsigned int *size)
|
||||
{
|
||||
|
||||
return _ck_ring_enqueue_mp_size(ring, buffer, &entry,
|
||||
sizeof(entry), size);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_ring_dequeue_mpsc(struct ck_ring *ring,
|
||||
const struct ck_ring_buffer *buffer,
|
||||
void *data)
|
||||
{
|
||||
|
||||
return _ck_ring_dequeue_sc(ring, buffer, (void **)data,
|
||||
sizeof(void *));
|
||||
}
|
||||
|
||||
/*
|
||||
* CK_RING_PROTOTYPE is used to define a type-safe interface for inlining
|
||||
* values of a particular type in the ring the buffer.
|
||||
*/
|
||||
#define CK_RING_PROTOTYPE(name, type) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_spsc_size_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c, \
|
||||
unsigned int *d) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_sp_size(a, b, c, \
|
||||
sizeof(struct type), d); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_spsc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_sp(a, b, c, \
|
||||
sizeof(struct type), NULL); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_dequeue_spsc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_dequeue_sc(a, b, c, \
|
||||
sizeof(struct type)); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_spmc_size_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c, \
|
||||
unsigned int *d) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_sp_size(a, b, c, \
|
||||
sizeof(struct type), d); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_spmc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_sp(a, b, c, \
|
||||
sizeof(struct type), NULL); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_trydequeue_spmc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_trydequeue_mc(a, \
|
||||
b, c, sizeof(struct type)); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_dequeue_spmc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_dequeue_mc(a, b, c, \
|
||||
sizeof(struct type)); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_mpsc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_mp(a, b, c, \
|
||||
sizeof(struct type), NULL); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_mpsc_size_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c, \
|
||||
unsigned int *d) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_mp_size(a, b, c, \
|
||||
sizeof(struct type), d); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_dequeue_mpsc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_dequeue_sc(a, b, c, \
|
||||
sizeof(struct type)); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_mpmc_size_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c, \
|
||||
unsigned int *d) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_mp_size(a, b, c, \
|
||||
sizeof(struct type), d); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_enqueue_mpmc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_enqueue_mp(a, b, c, \
|
||||
sizeof(struct type), NULL); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_trydequeue_mpmc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_trydequeue_mc(a, \
|
||||
b, c, sizeof(struct type)); \
|
||||
} \
|
||||
\
|
||||
CK_CC_INLINE static bool \
|
||||
ck_ring_dequeue_mpmc_##name(struct ck_ring *a, \
|
||||
struct type *b, \
|
||||
struct type *c) \
|
||||
{ \
|
||||
\
|
||||
return _ck_ring_dequeue_mc(a, b, c, \
|
||||
sizeof(struct type)); \
|
||||
}
|
||||
|
||||
/*
|
||||
* A single producer with one concurrent consumer.
|
||||
*/
|
||||
#define CK_RING_ENQUEUE_SPSC(name, a, b, c) \
|
||||
ck_ring_enqueue_spsc_##name(a, b, c)
|
||||
#define CK_RING_ENQUEUE_SPSC_SIZE(name, a, b, c, d) \
|
||||
ck_ring_enqueue_spsc_size_##name(a, b, c, d)
|
||||
#define CK_RING_DEQUEUE_SPSC(name, a, b, c) \
|
||||
ck_ring_dequeue_spsc_##name(a, b, c)
|
||||
|
||||
/*
|
||||
* A single producer with any number of concurrent consumers.
|
||||
*/
|
||||
#define CK_RING_ENQUEUE_SPMC(name, a, b, c) \
|
||||
ck_ring_enqueue_spmc_##name(a, b, c)
|
||||
#define CK_RING_ENQUEUE_SPMC_SIZE(name, a, b, c, d) \
|
||||
ck_ring_enqueue_spmc_size_##name(a, b, c, d)
|
||||
#define CK_RING_TRYDEQUEUE_SPMC(name, a, b, c) \
|
||||
ck_ring_trydequeue_spmc_##name(a, b, c)
|
||||
#define CK_RING_DEQUEUE_SPMC(name, a, b, c) \
|
||||
ck_ring_dequeue_spmc_##name(a, b, c)
|
||||
|
||||
/*
|
||||
* Any number of concurrent producers with up to one
|
||||
* concurrent consumer.
|
||||
*/
|
||||
#define CK_RING_ENQUEUE_MPSC(name, a, b, c) \
|
||||
ck_ring_enqueue_mpsc_##name(a, b, c)
|
||||
#define CK_RING_ENQUEUE_MPSC_SIZE(name, a, b, c, d) \
|
||||
ck_ring_enqueue_mpsc_size_##name(a, b, c, d)
|
||||
#define CK_RING_DEQUEUE_MPSC(name, a, b, c) \
|
||||
ck_ring_dequeue_mpsc_##name(a, b, c)
|
||||
|
||||
/*
|
||||
* Any number of concurrent producers and consumers.
|
||||
*/
|
||||
#define CK_RING_ENQUEUE_MPMC(name, a, b, c) \
|
||||
ck_ring_enqueue_mpmc_##name(a, b, c)
|
||||
#define CK_RING_ENQUEUE_MPMC_SIZE(name, a, b, c, d) \
|
||||
ck_ring_enqueue_mpmc_size_##name(a, b, c, d)
|
||||
#define CK_RING_TRYDEQUEUE_MPMC(name, a, b, c) \
|
||||
ck_ring_trydequeue_mpmc_##name(a, b, c)
|
||||
#define CK_RING_DEQUEUE_MPMC(name, a, b, c) \
|
||||
ck_ring_dequeue_mpmc_##name(a, b, c)
|
||||
|
||||
#endif /* CK_RING_H */
|
317
sys/contrib/ck/include/ck_rwcohort.h
Normal file
317
sys/contrib/ck/include/ck_rwcohort.h
Normal file
@ -0,0 +1,317 @@
|
||||
/*
|
||||
* Copyright 2013-2015 Samy Al Bahra.
|
||||
* Copyright 2013 Brendon Scheinman.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_RWCOHORT_H
|
||||
#define CK_RWCOHORT_H
|
||||
|
||||
/*
|
||||
* This is an implementation of NUMA-aware reader-writer locks as described in:
|
||||
* Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2014.
|
||||
* NUMA-Aware Reader-Writer Locks
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stddef.h>
|
||||
#include <ck_cohort.h>
|
||||
|
||||
#define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N
|
||||
#define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N)
|
||||
#define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL)
|
||||
#define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_wp_##N##_read_unlock(RW)
|
||||
#define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 1000
|
||||
|
||||
#define CK_RWCOHORT_WP_PROTOTYPE(N) \
|
||||
CK_RWCOHORT_WP_INSTANCE(N) { \
|
||||
unsigned int read_counter; \
|
||||
unsigned int write_barrier; \
|
||||
unsigned int wait_limit; \
|
||||
}; \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
|
||||
unsigned int wait_limit) \
|
||||
{ \
|
||||
\
|
||||
rw_cohort->read_counter = 0; \
|
||||
rw_cohort->write_barrier = 0; \
|
||||
rw_cohort->wait_limit = wait_limit; \
|
||||
ck_pr_barrier(); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
|
||||
void *local_context) \
|
||||
{ \
|
||||
\
|
||||
while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) \
|
||||
ck_pr_stall(); \
|
||||
\
|
||||
CK_COHORT_LOCK(N, cohort, global_context, local_context); \
|
||||
\
|
||||
while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) \
|
||||
ck_pr_stall(); \
|
||||
\
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
|
||||
void *local_context) \
|
||||
{ \
|
||||
\
|
||||
(void)rw_cohort; \
|
||||
CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
|
||||
void *local_context) \
|
||||
{ \
|
||||
unsigned int wait_count = 0; \
|
||||
bool raised = false; \
|
||||
\
|
||||
for (;;) { \
|
||||
ck_pr_inc_uint(&rw_cohort->read_counter); \
|
||||
ck_pr_fence_atomic_load(); \
|
||||
if (CK_COHORT_LOCKED(N, cohort, global_context, \
|
||||
local_context) == false) \
|
||||
break; \
|
||||
\
|
||||
ck_pr_dec_uint(&rw_cohort->read_counter); \
|
||||
while (CK_COHORT_LOCKED(N, cohort, global_context, \
|
||||
local_context) == true) { \
|
||||
ck_pr_stall(); \
|
||||
if (++wait_count > rw_cohort->wait_limit && \
|
||||
raised == false) { \
|
||||
ck_pr_inc_uint(&rw_cohort->write_barrier); \
|
||||
raised = true; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
if (raised == true) \
|
||||
ck_pr_dec_uint(&rw_cohort->write_barrier); \
|
||||
\
|
||||
ck_pr_fence_load(); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \
|
||||
{ \
|
||||
\
|
||||
ck_pr_fence_load_atomic(); \
|
||||
ck_pr_dec_uint(&cohort->read_counter); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_RWCOHORT_WP_INITIALIZER { \
|
||||
.read_counter = 0, \
|
||||
.write_barrier = 0, \
|
||||
.wait_limit = 0 \
|
||||
}
|
||||
|
||||
#define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N
|
||||
#define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N)
|
||||
#define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL)
|
||||
#define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_rp_##N##_read_unlock(RW)
|
||||
#define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000
|
||||
|
||||
#define CK_RWCOHORT_RP_PROTOTYPE(N) \
|
||||
CK_RWCOHORT_RP_INSTANCE(N) { \
|
||||
unsigned int read_counter; \
|
||||
unsigned int read_barrier; \
|
||||
unsigned int wait_limit; \
|
||||
}; \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
|
||||
unsigned int wait_limit) \
|
||||
{ \
|
||||
\
|
||||
rw_cohort->read_counter = 0; \
|
||||
rw_cohort->read_barrier = 0; \
|
||||
rw_cohort->wait_limit = wait_limit; \
|
||||
ck_pr_barrier(); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
|
||||
void *local_context) \
|
||||
{ \
|
||||
unsigned int wait_count = 0; \
|
||||
bool raised = false; \
|
||||
\
|
||||
for (;;) { \
|
||||
CK_COHORT_LOCK(N, cohort, global_context, local_context); \
|
||||
if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) \
|
||||
break; \
|
||||
\
|
||||
CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
|
||||
while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
|
||||
ck_pr_stall(); \
|
||||
if (++wait_count > rw_cohort->wait_limit && \
|
||||
raised == false) { \
|
||||
ck_pr_inc_uint(&rw_cohort->read_barrier); \
|
||||
raised = true; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
if (raised == true) \
|
||||
ck_pr_dec_uint(&rw_cohort->read_barrier); \
|
||||
\
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
|
||||
{ \
|
||||
\
|
||||
(void)rw_cohort; \
|
||||
CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
|
||||
void *local_context) \
|
||||
{ \
|
||||
\
|
||||
while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0) \
|
||||
ck_pr_stall(); \
|
||||
\
|
||||
ck_pr_inc_uint(&rw_cohort->read_counter); \
|
||||
ck_pr_fence_atomic_load(); \
|
||||
\
|
||||
while (CK_COHORT_LOCKED(N, cohort, global_context, \
|
||||
local_context) == true) \
|
||||
ck_pr_stall(); \
|
||||
\
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \
|
||||
{ \
|
||||
\
|
||||
ck_pr_fence_load_atomic(); \
|
||||
ck_pr_dec_uint(&cohort->read_counter); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_RWCOHORT_RP_INITIALIZER { \
|
||||
.read_counter = 0, \
|
||||
.read_barrier = 0, \
|
||||
.wait_limit = 0 \
|
||||
}
|
||||
|
||||
#define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N
|
||||
#define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N)
|
||||
#define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW)
|
||||
#define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_neutral_##N##_read_unlock(RW)
|
||||
#define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) \
|
||||
ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC)
|
||||
#define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000
|
||||
|
||||
#define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N) \
|
||||
CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \
|
||||
unsigned int read_counter; \
|
||||
}; \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \
|
||||
{ \
|
||||
\
|
||||
rw_cohort->read_counter = 0; \
|
||||
ck_pr_barrier(); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
|
||||
void *local_context) \
|
||||
{ \
|
||||
\
|
||||
CK_COHORT_LOCK(N, cohort, global_context, local_context); \
|
||||
while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
|
||||
ck_pr_stall(); \
|
||||
} \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
|
||||
{ \
|
||||
\
|
||||
(void)rw_cohort; \
|
||||
CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \
|
||||
CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
|
||||
void *local_context) \
|
||||
{ \
|
||||
\
|
||||
CK_COHORT_LOCK(N, cohort, global_context, local_context); \
|
||||
ck_pr_inc_uint(&rw_cohort->read_counter); \
|
||||
CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \
|
||||
{ \
|
||||
\
|
||||
ck_pr_fence_load_atomic(); \
|
||||
ck_pr_dec_uint(&cohort->read_counter); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_RWCOHORT_NEUTRAL_INITIALIZER { \
|
||||
.read_counter = 0, \
|
||||
}
|
||||
|
||||
#endif /* CK_RWCOHORT_H */
|
302
sys/contrib/ck/include/ck_rwlock.h
Normal file
302
sys/contrib/ck/include/ck_rwlock.h
Normal file
@ -0,0 +1,302 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_RWLOCK_H
|
||||
#define CK_RWLOCK_H
|
||||
|
||||
#include <ck_elide.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
struct ck_rwlock {
|
||||
unsigned int writer;
|
||||
unsigned int n_readers;
|
||||
};
|
||||
typedef struct ck_rwlock ck_rwlock_t;
|
||||
|
||||
#define CK_RWLOCK_INITIALIZER {0, 0}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_init(struct ck_rwlock *rw)
|
||||
{
|
||||
|
||||
rw->writer = 0;
|
||||
rw->n_readers = 0;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_write_unlock(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&rw->writer, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_rwlock_locked_writer(ck_rwlock_t *rw)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_load_uint(&rw->writer);
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_write_downgrade(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_inc_uint(&rw->n_readers);
|
||||
ck_rwlock_write_unlock(rw);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_rwlock_locked(ck_rwlock_t *rw)
|
||||
{
|
||||
bool l;
|
||||
|
||||
l = ck_pr_load_uint(&rw->n_readers) |
|
||||
ck_pr_load_uint(&rw->writer);
|
||||
ck_pr_fence_acquire();
|
||||
return l;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_rwlock_write_trylock(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
if (ck_pr_fas_uint(&rw->writer, 1) != 0)
|
||||
return false;
|
||||
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
if (ck_pr_load_uint(&rw->n_readers) != 0) {
|
||||
ck_rwlock_write_unlock(rw);
|
||||
return false;
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
|
||||
ck_rwlock_locked, ck_rwlock_write_trylock)
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_write_lock(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
while (ck_pr_fas_uint(&rw->writer, 1) != 0)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
while (ck_pr_load_uint(&rw->n_readers) != 0)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
|
||||
ck_rwlock_locked, ck_rwlock_write_lock,
|
||||
ck_rwlock_locked_writer, ck_rwlock_write_unlock)
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_rwlock_read_trylock(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
if (ck_pr_load_uint(&rw->writer) != 0)
|
||||
return false;
|
||||
|
||||
ck_pr_inc_uint(&rw->n_readers);
|
||||
|
||||
/*
|
||||
* Serialize with respect to concurrent write
|
||||
* lock operation.
|
||||
*/
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
if (ck_pr_load_uint(&rw->writer) == 0) {
|
||||
ck_pr_fence_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
ck_pr_dec_uint(&rw->n_readers);
|
||||
return false;
|
||||
}
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
|
||||
ck_rwlock_locked_writer, ck_rwlock_read_trylock)
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_read_lock(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
for (;;) {
|
||||
while (ck_pr_load_uint(&rw->writer) != 0)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_inc_uint(&rw->n_readers);
|
||||
|
||||
/*
|
||||
* Serialize with respect to concurrent write
|
||||
* lock operation.
|
||||
*/
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
if (ck_pr_load_uint(&rw->writer) == 0)
|
||||
break;
|
||||
|
||||
ck_pr_dec_uint(&rw->n_readers);
|
||||
}
|
||||
|
||||
/* Acquire semantics are necessary. */
|
||||
ck_pr_fence_load();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_rwlock_locked_reader(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_fence_load();
|
||||
return ck_pr_load_uint(&rw->n_readers);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_read_unlock(ck_rwlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_fence_load_atomic();
|
||||
ck_pr_dec_uint(&rw->n_readers);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
|
||||
ck_rwlock_locked_writer, ck_rwlock_read_lock,
|
||||
ck_rwlock_locked_reader, ck_rwlock_read_unlock)
|
||||
|
||||
/*
|
||||
* Recursive writer reader-writer lock implementation.
|
||||
*/
|
||||
struct ck_rwlock_recursive {
|
||||
struct ck_rwlock rw;
|
||||
unsigned int wc;
|
||||
};
|
||||
typedef struct ck_rwlock_recursive ck_rwlock_recursive_t;
|
||||
|
||||
#define CK_RWLOCK_RECURSIVE_INITIALIZER {CK_RWLOCK_INITIALIZER, 0}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_recursive_write_lock(ck_rwlock_recursive_t *rw, unsigned int tid)
|
||||
{
|
||||
unsigned int o;
|
||||
|
||||
o = ck_pr_load_uint(&rw->rw.writer);
|
||||
if (o == tid)
|
||||
goto leave;
|
||||
|
||||
while (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
while (ck_pr_load_uint(&rw->rw.n_readers) != 0)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
leave:
|
||||
rw->wc++;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_rwlock_recursive_write_trylock(ck_rwlock_recursive_t *rw, unsigned int tid)
|
||||
{
|
||||
unsigned int o;
|
||||
|
||||
o = ck_pr_load_uint(&rw->rw.writer);
|
||||
if (o == tid)
|
||||
goto leave;
|
||||
|
||||
if (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
|
||||
return false;
|
||||
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
if (ck_pr_load_uint(&rw->rw.n_readers) != 0) {
|
||||
ck_pr_store_uint(&rw->rw.writer, 0);
|
||||
return false;
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
leave:
|
||||
rw->wc++;
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_recursive_write_unlock(ck_rwlock_recursive_t *rw)
|
||||
{
|
||||
|
||||
if (--rw->wc == 0) {
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&rw->rw.writer, 0);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_recursive_read_lock(ck_rwlock_recursive_t *rw)
|
||||
{
|
||||
|
||||
ck_rwlock_read_lock(&rw->rw);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_rwlock_recursive_read_trylock(ck_rwlock_recursive_t *rw)
|
||||
{
|
||||
|
||||
return ck_rwlock_read_trylock(&rw->rw);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw)
|
||||
{
|
||||
|
||||
ck_rwlock_read_unlock(&rw->rw);
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CK_RWLOCK_H */
|
125
sys/contrib/ck/include/ck_sequence.h
Normal file
125
sys/contrib/ck/include/ck_sequence.h
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SEQUENCE_H
|
||||
#define CK_SEQUENCE_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
struct ck_sequence {
|
||||
unsigned int sequence;
|
||||
};
|
||||
typedef struct ck_sequence ck_sequence_t;
|
||||
|
||||
#define CK_SEQUENCE_INITIALIZER { .sequence = 0 }
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_sequence_init(struct ck_sequence *sq)
|
||||
{
|
||||
|
||||
ck_pr_store_uint(&sq->sequence, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_sequence_read_begin(const struct ck_sequence *sq)
|
||||
{
|
||||
unsigned int version;
|
||||
|
||||
for (;;) {
|
||||
version = ck_pr_load_uint(&sq->sequence);
|
||||
|
||||
/*
|
||||
* If a sequence is even then associated data may be in a
|
||||
* consistent state.
|
||||
*/
|
||||
if (CK_CC_LIKELY((version & 1) == 0))
|
||||
break;
|
||||
|
||||
/*
|
||||
* If a sequence is odd then a thread is in the middle of an
|
||||
* update. Retry the read to avoid operating on inconsistent
|
||||
* data.
|
||||
*/
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_load();
|
||||
return version;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_sequence_read_retry(const struct ck_sequence *sq, unsigned int version)
|
||||
{
|
||||
|
||||
/*
|
||||
* If the sequence number was updated then a read should be
|
||||
* re-attempted.
|
||||
*/
|
||||
ck_pr_fence_load();
|
||||
return ck_pr_load_uint(&sq->sequence) != version;
|
||||
}
|
||||
|
||||
#define CK_SEQUENCE_READ(seqlock, version) \
|
||||
for (*(version) = 1; \
|
||||
(*(version) != 0) && (*(version) = ck_sequence_read_begin(seqlock), 1); \
|
||||
*(version) = ck_sequence_read_retry(seqlock, *(version)))
|
||||
|
||||
/*
|
||||
* This must be called after a successful mutex acquisition.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_sequence_write_begin(struct ck_sequence *sq)
|
||||
{
|
||||
|
||||
/*
|
||||
* Increment the sequence to an odd number to indicate
|
||||
* the beginning of a write update.
|
||||
*/
|
||||
ck_pr_store_uint(&sq->sequence, sq->sequence + 1);
|
||||
ck_pr_fence_store();
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called before mutex ownership is relinquished.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_sequence_write_end(struct ck_sequence *sq)
|
||||
{
|
||||
|
||||
/*
|
||||
* Increment the sequence to an even number to indicate
|
||||
* completion of a write update.
|
||||
*/
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&sq->sequence, sq->sequence + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CK_SEQUENCE_H */
|
61
sys/contrib/ck/include/ck_spinlock.h
Normal file
61
sys/contrib/ck/include/ck_spinlock.h
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_H
|
||||
#define CK_SPINLOCK_H
|
||||
|
||||
#include "spinlock/anderson.h"
|
||||
#include "spinlock/cas.h"
|
||||
#include "spinlock/clh.h"
|
||||
#include "spinlock/dec.h"
|
||||
#include "spinlock/fas.h"
|
||||
#include "spinlock/hclh.h"
|
||||
#include "spinlock/mcs.h"
|
||||
#include "spinlock/ticket.h"
|
||||
|
||||
/*
|
||||
* On tested x86, x86_64, PPC64 and SPARC64 targets,
|
||||
* ck_spinlock_fas proved to have lowest latency
|
||||
* in fast path testing or negligible degradation
|
||||
* from faster but less robust implementations.
|
||||
*/
|
||||
#define CK_SPINLOCK_INITIALIZER CK_SPINLOCK_FAS_INITIALIZER
|
||||
#define ck_spinlock_t ck_spinlock_fas_t
|
||||
#define ck_spinlock_init(x) ck_spinlock_fas_init(x)
|
||||
#define ck_spinlock_lock(x) ck_spinlock_fas_lock(x)
|
||||
#define ck_spinlock_lock_eb(x) ck_spinlock_fas_lock_eb(x)
|
||||
#define ck_spinlock_unlock(x) ck_spinlock_fas_unlock(x)
|
||||
#define ck_spinlock_locked(x) ck_spinlock_fas_locked(x)
|
||||
#define ck_spinlock_trylock(x) ck_spinlock_fas_trylock(x)
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_spinlock, ck_spinlock_t,
|
||||
ck_spinlock_locked, ck_spinlock_lock,
|
||||
ck_spinlock_locked, ck_spinlock_unlock)
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock, ck_spinlock_t,
|
||||
ck_spinlock_locked, ck_spinlock_trylock)
|
||||
|
||||
#endif /* CK_SPINLOCK_H */
|
357
sys/contrib/ck/include/ck_stack.h
Normal file
357
sys/contrib/ck/include/ck_stack.h
Normal file
@ -0,0 +1,357 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_STACK_H
|
||||
#define CK_STACK_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
struct ck_stack_entry {
|
||||
struct ck_stack_entry *next;
|
||||
};
|
||||
typedef struct ck_stack_entry ck_stack_entry_t;
|
||||
|
||||
struct ck_stack {
|
||||
struct ck_stack_entry *head;
|
||||
char *generation CK_CC_PACKED;
|
||||
} CK_CC_ALIASED;
|
||||
typedef struct ck_stack ck_stack_t;
|
||||
|
||||
#define CK_STACK_INITIALIZER { NULL, NULL }
|
||||
|
||||
#ifndef CK_F_STACK_PUSH_UPMC
|
||||
#define CK_F_STACK_PUSH_UPMC
|
||||
/*
|
||||
* Stack producer operation safe for multiple unique producers and multiple consumers.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_stack_push_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
struct ck_stack_entry *stack;
|
||||
|
||||
stack = ck_pr_load_ptr(&target->head);
|
||||
entry->next = stack;
|
||||
ck_pr_fence_store();
|
||||
|
||||
while (ck_pr_cas_ptr_value(&target->head, stack, entry, &stack) == false) {
|
||||
entry->next = stack;
|
||||
ck_pr_fence_store();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_STACK_PUSH_UPMC */
|
||||
|
||||
#ifndef CK_F_STACK_TRYPUSH_UPMC
|
||||
#define CK_F_STACK_TRYPUSH_UPMC
|
||||
/*
|
||||
* Stack producer operation for multiple unique producers and multiple consumers.
|
||||
* Returns true on success and false on failure.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_stack_trypush_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
struct ck_stack_entry *stack;
|
||||
|
||||
stack = ck_pr_load_ptr(&target->head);
|
||||
entry->next = stack;
|
||||
ck_pr_fence_store();
|
||||
|
||||
return ck_pr_cas_ptr(&target->head, stack, entry);
|
||||
}
|
||||
#endif /* CK_F_STACK_TRYPUSH_UPMC */
|
||||
|
||||
#ifndef CK_F_STACK_POP_UPMC
|
||||
#define CK_F_STACK_POP_UPMC
|
||||
/*
|
||||
* Stack consumer operation safe for multiple unique producers and multiple consumers.
|
||||
*/
|
||||
CK_CC_INLINE static struct ck_stack_entry *
|
||||
ck_stack_pop_upmc(struct ck_stack *target)
|
||||
{
|
||||
struct ck_stack_entry *entry, *next;
|
||||
|
||||
entry = ck_pr_load_ptr(&target->head);
|
||||
if (entry == NULL)
|
||||
return NULL;
|
||||
|
||||
ck_pr_fence_load();
|
||||
next = entry->next;
|
||||
while (ck_pr_cas_ptr_value(&target->head, entry, next, &entry) == false) {
|
||||
if (entry == NULL)
|
||||
break;
|
||||
|
||||
ck_pr_fence_load();
|
||||
next = entry->next;
|
||||
}
|
||||
|
||||
return entry;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CK_F_STACK_TRYPOP_UPMC
|
||||
#define CK_F_STACK_TRYPOP_UPMC
|
||||
/*
|
||||
* Stack production operation for multiple unique producers and multiple consumers.
|
||||
* Returns true on success and false on failure. The value pointed to by the second
|
||||
* argument is set to a valid ck_stack_entry_t reference if true is returned. If
|
||||
* false is returned, then the value pointed to by the second argument is undefined.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_stack_trypop_upmc(struct ck_stack *target, struct ck_stack_entry **r)
|
||||
{
|
||||
struct ck_stack_entry *entry;
|
||||
|
||||
entry = ck_pr_load_ptr(&target->head);
|
||||
if (entry == NULL)
|
||||
return false;
|
||||
|
||||
ck_pr_fence_load();
|
||||
if (ck_pr_cas_ptr(&target->head, entry, entry->next) == true) {
|
||||
*r = entry;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* CK_F_STACK_TRYPOP_UPMC */
|
||||
|
||||
#ifndef CK_F_STACK_BATCH_POP_UPMC
|
||||
#define CK_F_STACK_BATCH_POP_UPMC
|
||||
/*
|
||||
* Pop all items off the stack.
|
||||
*/
|
||||
CK_CC_INLINE static struct ck_stack_entry *
|
||||
ck_stack_batch_pop_upmc(struct ck_stack *target)
|
||||
{
|
||||
struct ck_stack_entry *entry;
|
||||
|
||||
entry = ck_pr_fas_ptr(&target->head, NULL);
|
||||
ck_pr_fence_load();
|
||||
return entry;
|
||||
}
|
||||
#endif /* CK_F_STACK_BATCH_POP_UPMC */
|
||||
|
||||
#ifndef CK_F_STACK_PUSH_MPMC
|
||||
#define CK_F_STACK_PUSH_MPMC
|
||||
/*
|
||||
* Stack producer operation safe for multiple producers and multiple consumers.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_stack_push_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
|
||||
ck_stack_push_upmc(target, entry);
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_STACK_PUSH_MPMC */
|
||||
|
||||
#ifndef CK_F_STACK_TRYPUSH_MPMC
|
||||
#define CK_F_STACK_TRYPUSH_MPMC
|
||||
/*
|
||||
* Stack producer operation safe for multiple producers and multiple consumers.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_stack_trypush_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
|
||||
return ck_stack_trypush_upmc(target, entry);
|
||||
}
|
||||
#endif /* CK_F_STACK_TRYPUSH_MPMC */
|
||||
|
||||
#ifdef CK_F_PR_CAS_PTR_2_VALUE
|
||||
#ifndef CK_F_STACK_POP_MPMC
|
||||
#define CK_F_STACK_POP_MPMC
|
||||
/*
|
||||
* Stack consumer operation safe for multiple producers and multiple consumers.
|
||||
*/
|
||||
CK_CC_INLINE static struct ck_stack_entry *
|
||||
ck_stack_pop_mpmc(struct ck_stack *target)
|
||||
{
|
||||
struct ck_stack original, update;
|
||||
|
||||
original.generation = ck_pr_load_ptr(&target->generation);
|
||||
ck_pr_fence_load();
|
||||
original.head = ck_pr_load_ptr(&target->head);
|
||||
if (original.head == NULL)
|
||||
return NULL;
|
||||
|
||||
/* Order with respect to next pointer. */
|
||||
ck_pr_fence_load();
|
||||
|
||||
update.generation = original.generation + 1;
|
||||
update.head = original.head->next;
|
||||
|
||||
while (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == false) {
|
||||
if (original.head == NULL)
|
||||
return NULL;
|
||||
|
||||
update.generation = original.generation + 1;
|
||||
|
||||
/* Order with respect to next pointer. */
|
||||
ck_pr_fence_load();
|
||||
update.head = original.head->next;
|
||||
}
|
||||
|
||||
return original.head;
|
||||
}
|
||||
#endif /* CK_F_STACK_POP_MPMC */
|
||||
|
||||
#ifndef CK_F_STACK_TRYPOP_MPMC
|
||||
#define CK_F_STACK_TRYPOP_MPMC
|
||||
CK_CC_INLINE static bool
|
||||
ck_stack_trypop_mpmc(struct ck_stack *target, struct ck_stack_entry **r)
|
||||
{
|
||||
struct ck_stack original, update;
|
||||
|
||||
original.generation = ck_pr_load_ptr(&target->generation);
|
||||
ck_pr_fence_load();
|
||||
original.head = ck_pr_load_ptr(&target->head);
|
||||
if (original.head == NULL)
|
||||
return false;
|
||||
|
||||
update.generation = original.generation + 1;
|
||||
ck_pr_fence_load();
|
||||
update.head = original.head->next;
|
||||
|
||||
if (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == true) {
|
||||
*r = original.head;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* CK_F_STACK_TRYPOP_MPMC */
|
||||
#endif /* CK_F_PR_CAS_PTR_2_VALUE */
|
||||
|
||||
#ifndef CK_F_STACK_BATCH_POP_MPMC
|
||||
#define CK_F_STACK_BATCH_POP_MPMC
|
||||
/*
|
||||
* This is equivalent to the UP/MC version as NULL does not need a
|
||||
* a generation count.
|
||||
*/
|
||||
CK_CC_INLINE static struct ck_stack_entry *
|
||||
ck_stack_batch_pop_mpmc(struct ck_stack *target)
|
||||
{
|
||||
|
||||
return ck_stack_batch_pop_upmc(target);
|
||||
}
|
||||
#endif /* CK_F_STACK_BATCH_POP_MPMC */
|
||||
|
||||
#ifndef CK_F_STACK_PUSH_MPNC
|
||||
#define CK_F_STACK_PUSH_MPNC
|
||||
/*
|
||||
* Stack producer operation safe with no concurrent consumers.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_stack_push_mpnc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
struct ck_stack_entry *stack;
|
||||
|
||||
entry->next = NULL;
|
||||
ck_pr_fence_store_atomic();
|
||||
stack = ck_pr_fas_ptr(&target->head, entry);
|
||||
ck_pr_store_ptr(&entry->next, stack);
|
||||
ck_pr_fence_store();
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_STACK_PUSH_MPNC */
|
||||
|
||||
/*
|
||||
* Stack producer operation for single producer and no concurrent consumers.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_stack_push_spnc(struct ck_stack *target, struct ck_stack_entry *entry)
|
||||
{
|
||||
|
||||
entry->next = target->head;
|
||||
target->head = entry;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stack consumer operation for no concurrent producers and single consumer.
|
||||
*/
|
||||
CK_CC_INLINE static struct ck_stack_entry *
|
||||
ck_stack_pop_npsc(struct ck_stack *target)
|
||||
{
|
||||
struct ck_stack_entry *n;
|
||||
|
||||
if (target->head == NULL)
|
||||
return NULL;
|
||||
|
||||
n = target->head;
|
||||
target->head = n->next;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pop all items off a stack.
|
||||
*/
|
||||
CK_CC_INLINE static struct ck_stack_entry *
|
||||
ck_stack_batch_pop_npsc(struct ck_stack *target)
|
||||
{
|
||||
struct ck_stack_entry *n;
|
||||
|
||||
n = target->head;
|
||||
target->head = NULL;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stack initialization function. Guarantees initialization across processors.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_stack_init(struct ck_stack *stack)
|
||||
{
|
||||
|
||||
stack->head = NULL;
|
||||
stack->generation = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Defines a container_of functions for */
|
||||
#define CK_STACK_CONTAINER(T, M, N) CK_CC_CONTAINER(ck_stack_entry_t, T, M, N)
|
||||
|
||||
#define CK_STACK_ISEMPTY(m) ((m)->head == NULL)
|
||||
#define CK_STACK_FIRST(s) ((s)->head)
|
||||
#define CK_STACK_NEXT(m) ((m)->next)
|
||||
#define CK_STACK_FOREACH(stack, entry) \
|
||||
for ((entry) = CK_STACK_FIRST(stack); \
|
||||
(entry) != NULL; \
|
||||
(entry) = CK_STACK_NEXT(entry))
|
||||
#define CK_STACK_FOREACH_SAFE(stack, entry, T) \
|
||||
for ((entry) = CK_STACK_FIRST(stack); \
|
||||
(entry) != NULL && ((T) = (entry)->next, 1); \
|
||||
(entry) = (T))
|
||||
|
||||
#endif /* CK_STACK_H */
|
31
sys/contrib/ck/include/ck_stdbool.h
Normal file
31
sys/contrib/ck/include/ck_stdbool.h
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2015 Olivier Houchard.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#if defined(__FreeBSD__) && defined(_KERNEL)
|
||||
#include <sys/types.h>
|
||||
#else
|
||||
#include <stdbool.h>
|
||||
#endif
|
31
sys/contrib/ck/include/ck_stddef.h
Normal file
31
sys/contrib/ck/include/ck_stddef.h
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2015 Olivier Houchard.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#if defined(__FreeBSD__) && defined(_KERNEL)
|
||||
#include <sys/stddef.h>
|
||||
#else
|
||||
#include <stddef.h>
|
||||
#endif
|
34
sys/contrib/ck/include/ck_stdint.h
Normal file
34
sys/contrib/ck/include/ck_stdint.h
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#if defined(__linux__) && defined(__KERNEL__)
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#elif defined(__FreeBSD__) && defined(_KERNEL)
|
||||
#include <sys/stdint.h>
|
||||
#else
|
||||
#include <stdint.h>
|
||||
#endif /* __linux__ && __KERNEL__ */
|
31
sys/contrib/ck/include/ck_stdlib.h
Normal file
31
sys/contrib/ck/include/ck_stdlib.h
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2015 Olivier Houchard.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#if defined(__FreeBSD__) && defined(_KERNEL)
|
||||
#include <sys/systm.h>
|
||||
#else
|
||||
#include <stdlib.h>
|
||||
#endif
|
31
sys/contrib/ck/include/ck_string.h
Normal file
31
sys/contrib/ck/include/ck_string.h
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2015 Olivier Houchard.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#if defined(__FreeBSD__) && defined(_KERNEL)
|
||||
#include <sys/systm.h>
|
||||
#else
|
||||
#include <string.h>
|
||||
#endif
|
218
sys/contrib/ck/include/ck_swlock.h
Normal file
218
sys/contrib/ck/include/ck_swlock.h
Normal file
@ -0,0 +1,218 @@
|
||||
/*
|
||||
* Copyright 2014 Jaidev Sridhar.
|
||||
* Copyright 2014 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SWLOCK_H
|
||||
#define CK_SWLOCK_H
|
||||
|
||||
#include <ck_elide.h>
|
||||
#include <ck_limits.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
struct ck_swlock {
|
||||
uint32_t value;
|
||||
};
|
||||
typedef struct ck_swlock ck_swlock_t;
|
||||
|
||||
#define CK_SWLOCK_INITIALIZER {0}
|
||||
#define CK_SWLOCK_WRITER_BIT (1UL << 31)
|
||||
#define CK_SWLOCK_LATCH_BIT (1UL << 30)
|
||||
#define CK_SWLOCK_WRITER_MASK (CK_SWLOCK_LATCH_BIT | CK_SWLOCK_WRITER_BIT)
|
||||
#define CK_SWLOCK_READER_MASK (UINT32_MAX ^ CK_SWLOCK_WRITER_MASK)
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_init(struct ck_swlock *rw)
|
||||
{
|
||||
|
||||
rw->value = 0;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_write_unlock(ck_swlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_and_32(&rw->value, CK_SWLOCK_READER_MASK);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_swlock_locked_writer(ck_swlock_t *rw)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_load_32(&rw->value) & CK_SWLOCK_WRITER_BIT;
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_write_downgrade(ck_swlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_inc_32(&rw->value);
|
||||
ck_swlock_write_unlock(rw);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_swlock_locked(ck_swlock_t *rw)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_load_32(&rw->value);
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_swlock_write_trylock(ck_swlock_t *rw)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_cas_32(&rw->value, 0, CK_SWLOCK_WRITER_BIT);
|
||||
ck_pr_fence_lock();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_swlock_write, ck_swlock_t,
|
||||
ck_swlock_locked, ck_swlock_write_trylock)
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_write_lock(ck_swlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_or_32(&rw->value, CK_SWLOCK_WRITER_BIT);
|
||||
while (ck_pr_load_32(&rw->value) & CK_SWLOCK_READER_MASK)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_write_latch(ck_swlock_t *rw)
|
||||
{
|
||||
|
||||
/* Publish intent to acquire lock. */
|
||||
ck_pr_or_32(&rw->value, CK_SWLOCK_WRITER_BIT);
|
||||
|
||||
/* Stall until readers have seen the writer and cleared. */
|
||||
while (ck_pr_cas_32(&rw->value, CK_SWLOCK_WRITER_BIT,
|
||||
CK_SWLOCK_WRITER_MASK) == false) {
|
||||
do {
|
||||
ck_pr_stall();
|
||||
} while (ck_pr_load_32(&rw->value) != CK_SWLOCK_WRITER_BIT);
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_write_unlatch(ck_swlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_32(&rw->value, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_swlock_write, ck_swlock_t,
|
||||
ck_swlock_locked, ck_swlock_write_lock,
|
||||
ck_swlock_locked_writer, ck_swlock_write_unlock)
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_swlock_read, ck_swlock_t,
|
||||
ck_swlock_locked_writer, ck_swlock_read_trylock)
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_swlock_read_trylock(ck_swlock_t *rw)
|
||||
{
|
||||
uint32_t l = ck_pr_load_32(&rw->value);
|
||||
|
||||
if (l & CK_SWLOCK_WRITER_BIT)
|
||||
return false;
|
||||
|
||||
l = ck_pr_faa_32(&rw->value, 1) & CK_SWLOCK_WRITER_MASK;
|
||||
if (l == CK_SWLOCK_WRITER_BIT)
|
||||
ck_pr_dec_32(&rw->value);
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return l == 0;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_read_lock(ck_swlock_t *rw)
|
||||
{
|
||||
uint32_t l;
|
||||
|
||||
for (;;) {
|
||||
while (ck_pr_load_32(&rw->value) & CK_SWLOCK_WRITER_BIT)
|
||||
ck_pr_stall();
|
||||
|
||||
l = ck_pr_faa_32(&rw->value, 1) & CK_SWLOCK_WRITER_MASK;
|
||||
if (l == 0)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If the latch bit has not been set, then the writer would
|
||||
* have observed the reader and will wait to completion of
|
||||
* read-side critical section.
|
||||
*/
|
||||
if (l == CK_SWLOCK_WRITER_BIT)
|
||||
ck_pr_dec_32(&rw->value);
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_swlock_locked_reader(ck_swlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_fence_load();
|
||||
return ck_pr_load_32(&rw->value) & CK_SWLOCK_READER_MASK;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_swlock_read_unlock(ck_swlock_t *rw)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_dec_32(&rw->value);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_swlock_read, ck_swlock_t,
|
||||
ck_swlock_locked_writer, ck_swlock_read_lock,
|
||||
ck_swlock_locked_reader, ck_swlock_read_unlock)
|
||||
|
||||
#endif /* CK_SWLOCK_H */
|
136
sys/contrib/ck/include/ck_tflock.h
Normal file
136
sys/contrib/ck/include/ck_tflock.h
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright 2014 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_TFLOCK_TICKET_H
|
||||
#define CK_TFLOCK_TICKET_H
|
||||
|
||||
/*
|
||||
* This is an implementation of task-fair locks derived from the work
|
||||
* described in:
|
||||
* John M. Mellor-Crummey and Michael L. Scott. 1991.
|
||||
* Scalable reader-writer synchronization for shared-memory
|
||||
* multiprocessors. SIGPLAN Not. 26, 7 (April 1991), 106-113.
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
|
||||
struct ck_tflock_ticket {
|
||||
uint32_t request;
|
||||
uint32_t completion;
|
||||
};
|
||||
typedef struct ck_tflock_ticket ck_tflock_ticket_t;
|
||||
|
||||
#define CK_TFLOCK_TICKET_INITIALIZER { 0, 0 }
|
||||
|
||||
#define CK_TFLOCK_TICKET_RC_INCR 0x10000U /* Read-side increment. */
|
||||
#define CK_TFLOCK_TICKET_WC_INCR 0x1U /* Write-side increment. */
|
||||
#define CK_TFLOCK_TICKET_W_MASK 0xffffU /* Write-side mask. */
|
||||
#define CK_TFLOCK_TICKET_WC_TOPMSK 0x8000U /* Write clear mask for overflow. */
|
||||
#define CK_TFLOCK_TICKET_RC_TOPMSK 0x80000000U /* Read clear mask for overflow. */
|
||||
|
||||
CK_CC_INLINE static uint32_t
|
||||
ck_tflock_ticket_fca_32(uint32_t *target, uint32_t mask, uint32_t delta)
|
||||
{
|
||||
uint32_t snapshot = ck_pr_load_32(target);
|
||||
uint32_t goal;
|
||||
|
||||
for (;;) {
|
||||
goal = (snapshot & ~mask) + delta;
|
||||
if (ck_pr_cas_32_value(target, snapshot, goal, &snapshot) == true)
|
||||
break;
|
||||
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_tflock_ticket_init(struct ck_tflock_ticket *pf)
|
||||
{
|
||||
|
||||
pf->request = pf->completion = 0;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_tflock_ticket_write_lock(struct ck_tflock_ticket *lock)
|
||||
{
|
||||
uint32_t previous;
|
||||
|
||||
previous = ck_tflock_ticket_fca_32(&lock->request, CK_TFLOCK_TICKET_WC_TOPMSK,
|
||||
CK_TFLOCK_TICKET_WC_INCR);
|
||||
ck_pr_fence_atomic_load();
|
||||
while (ck_pr_load_32(&lock->completion) != previous)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_tflock_ticket_write_unlock(struct ck_tflock_ticket *lock)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_tflock_ticket_fca_32(&lock->completion, CK_TFLOCK_TICKET_WC_TOPMSK,
|
||||
CK_TFLOCK_TICKET_WC_INCR);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_tflock_ticket_read_lock(struct ck_tflock_ticket *lock)
|
||||
{
|
||||
uint32_t previous;
|
||||
|
||||
previous = ck_tflock_ticket_fca_32(&lock->request,
|
||||
CK_TFLOCK_TICKET_RC_TOPMSK, CK_TFLOCK_TICKET_RC_INCR) &
|
||||
CK_TFLOCK_TICKET_W_MASK;
|
||||
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
while ((ck_pr_load_32(&lock->completion) &
|
||||
CK_TFLOCK_TICKET_W_MASK) != previous) {
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_tflock_ticket_read_unlock(struct ck_tflock_ticket *lock)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_tflock_ticket_fca_32(&lock->completion, CK_TFLOCK_TICKET_RC_TOPMSK,
|
||||
CK_TFLOCK_TICKET_RC_INCR);
|
||||
return;
|
||||
}
|
||||
|
||||
#endif /* CK_TFLOCK_TICKET_H */
|
167
sys/contrib/ck/include/gcc/aarch64/ck_f_pr.h
Normal file
167
sys/contrib/ck/include/gcc/aarch64/ck_f_pr.h
Normal file
@ -0,0 +1,167 @@
|
||||
/* DO NOT EDIT. This is auto-generated from feature.sh */
|
||||
#define CK_F_PR_ADD_16
|
||||
#define CK_F_PR_ADD_32
|
||||
#define CK_F_PR_ADD_64
|
||||
#define CK_F_PR_ADD_8
|
||||
#define CK_F_PR_ADD_CHAR
|
||||
#define CK_F_PR_ADD_INT
|
||||
#define CK_F_PR_ADD_PTR
|
||||
#define CK_F_PR_ADD_SHORT
|
||||
#define CK_F_PR_ADD_UINT
|
||||
#define CK_F_PR_AND_16
|
||||
#define CK_F_PR_AND_32
|
||||
#define CK_F_PR_AND_64
|
||||
#define CK_F_PR_AND_8
|
||||
#define CK_F_PR_AND_CHAR
|
||||
#define CK_F_PR_AND_INT
|
||||
#define CK_F_PR_AND_PTR
|
||||
#define CK_F_PR_AND_SHORT
|
||||
#define CK_F_PR_AND_UINT
|
||||
#define CK_F_PR_BARRIER
|
||||
#define CK_F_PR_CAS_16
|
||||
#define CK_F_PR_CAS_16_VALUE
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#define CK_F_PR_CAS_64
|
||||
#define CK_F_PR_CAS_64_VALUE
|
||||
#define CK_F_PR_CAS_64_2
|
||||
#define CK_F_PR_CAS_64_2_VALUE
|
||||
#define CK_F_PR_CAS_DOUBLE
|
||||
#define CK_F_PR_CAS_DOUBLE_VALUE
|
||||
#define CK_F_PR_CAS_8
|
||||
#define CK_F_PR_CAS_8_VALUE
|
||||
#define CK_F_PR_CAS_CHAR
|
||||
#define CK_F_PR_CAS_CHAR_VALUE
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#define CK_F_PR_CAS_PTR_2
|
||||
#define CK_F_PR_CAS_PTR_2_VALUE
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_CAS_SHORT
|
||||
#define CK_F_PR_CAS_SHORT_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_DEC_16
|
||||
#define CK_F_PR_DEC_32
|
||||
#define CK_F_PR_DEC_64
|
||||
#define CK_F_PR_DEC_8
|
||||
#define CK_F_PR_DEC_CHAR
|
||||
#define CK_F_PR_DEC_INT
|
||||
#define CK_F_PR_DEC_PTR
|
||||
#define CK_F_PR_DEC_SHORT
|
||||
#define CK_F_PR_DEC_UINT
|
||||
#define CK_F_PR_FAA_16
|
||||
#define CK_F_PR_FAA_32
|
||||
#define CK_F_PR_FAA_64
|
||||
#define CK_F_PR_FAA_8
|
||||
#define CK_F_PR_FAA_CHAR
|
||||
#define CK_F_PR_FAA_INT
|
||||
#define CK_F_PR_FAA_PTR
|
||||
#define CK_F_PR_FAA_SHORT
|
||||
#define CK_F_PR_FAA_UINT
|
||||
#define CK_F_PR_FAS_16
|
||||
#define CK_F_PR_FAS_32
|
||||
#define CK_F_PR_FAS_64
|
||||
#define CK_F_PR_FAS_8
|
||||
#define CK_F_PR_FAS_CHAR
|
||||
#define CK_F_PR_FAS_INT
|
||||
#define CK_F_PR_FAS_PTR
|
||||
#define CK_F_PR_FAS_SHORT
|
||||
#define CK_F_PR_FAS_UINT
|
||||
#define CK_F_PR_FENCE_ATOMIC
|
||||
#define CK_F_PR_FENCE_ATOMIC_LOAD
|
||||
#define CK_F_PR_FENCE_ATOMIC_STORE
|
||||
#define CK_F_PR_FENCE_LOAD
|
||||
#define CK_F_PR_FENCE_LOAD_ATOMIC
|
||||
#define CK_F_PR_FENCE_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_LOAD_STORE
|
||||
#define CK_F_PR_FENCE_MEMORY
|
||||
#define CK_F_PR_FENCE_STORE
|
||||
#define CK_F_PR_FENCE_STORE_ATOMIC
|
||||
#define CK_F_PR_FENCE_STORE_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_ATOMIC
|
||||
#define CK_F_PR_FENCE_STRICT_ATOMIC_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_ATOMIC_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_ATOMIC
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_MEMORY
|
||||
#define CK_F_PR_FENCE_STRICT_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_STORE_ATOMIC
|
||||
#define CK_F_PR_FENCE_STRICT_STORE_LOAD
|
||||
#define CK_F_PR_INC_16
|
||||
#define CK_F_PR_INC_32
|
||||
#define CK_F_PR_INC_64
|
||||
#define CK_F_PR_INC_8
|
||||
#define CK_F_PR_INC_CHAR
|
||||
#define CK_F_PR_INC_INT
|
||||
#define CK_F_PR_INC_PTR
|
||||
#define CK_F_PR_INC_SHORT
|
||||
#define CK_F_PR_INC_UINT
|
||||
#define CK_F_PR_LOAD_16
|
||||
#define CK_F_PR_LOAD_32
|
||||
#define CK_F_PR_LOAD_64
|
||||
#define CK_F_PR_LOAD_DOUBLE
|
||||
#define CK_F_PR_LOAD_8
|
||||
#define CK_F_PR_LOAD_CHAR
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
#define CK_F_PR_LOAD_SHORT
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_NEG_16
|
||||
#define CK_F_PR_NEG_32
|
||||
#define CK_F_PR_NEG_64
|
||||
#define CK_F_PR_NEG_8
|
||||
#define CK_F_PR_NEG_CHAR
|
||||
#define CK_F_PR_NEG_INT
|
||||
#define CK_F_PR_NEG_PTR
|
||||
#define CK_F_PR_NEG_SHORT
|
||||
#define CK_F_PR_NEG_UINT
|
||||
#define CK_F_PR_NOT_16
|
||||
#define CK_F_PR_NOT_32
|
||||
#define CK_F_PR_NOT_64
|
||||
#define CK_F_PR_NOT_8
|
||||
#define CK_F_PR_NOT_CHAR
|
||||
#define CK_F_PR_NOT_INT
|
||||
#define CK_F_PR_NOT_PTR
|
||||
#define CK_F_PR_NOT_SHORT
|
||||
#define CK_F_PR_NOT_UINT
|
||||
#define CK_F_PR_OR_16
|
||||
#define CK_F_PR_OR_32
|
||||
#define CK_F_PR_OR_64
|
||||
#define CK_F_PR_OR_8
|
||||
#define CK_F_PR_OR_CHAR
|
||||
#define CK_F_PR_OR_INT
|
||||
#define CK_F_PR_OR_PTR
|
||||
#define CK_F_PR_OR_SHORT
|
||||
#define CK_F_PR_OR_UINT
|
||||
#define CK_F_PR_STALL
|
||||
#define CK_F_PR_STORE_16
|
||||
#define CK_F_PR_STORE_32
|
||||
#define CK_F_PR_STORE_64
|
||||
#define CK_F_PR_STORE_DOUBLE
|
||||
#define CK_F_PR_STORE_8
|
||||
#define CK_F_PR_STORE_CHAR
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_STORE_SHORT
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_SUB_16
|
||||
#define CK_F_PR_SUB_32
|
||||
#define CK_F_PR_SUB_64
|
||||
#define CK_F_PR_SUB_8
|
||||
#define CK_F_PR_SUB_CHAR
|
||||
#define CK_F_PR_SUB_INT
|
||||
#define CK_F_PR_SUB_PTR
|
||||
#define CK_F_PR_SUB_SHORT
|
||||
#define CK_F_PR_SUB_UINT
|
||||
#define CK_F_PR_XOR_16
|
||||
#define CK_F_PR_XOR_32
|
||||
#define CK_F_PR_XOR_64
|
||||
#define CK_F_PR_XOR_8
|
||||
#define CK_F_PR_XOR_CHAR
|
||||
#define CK_F_PR_XOR_INT
|
||||
#define CK_F_PR_XOR_PTR
|
||||
#define CK_F_PR_XOR_SHORT
|
||||
#define CK_F_PR_XOR_UINT
|
492
sys/contrib/ck/include/gcc/aarch64/ck_pr.h
Normal file
492
sys/contrib/ck/include/gcc/aarch64/ck_pr.h
Normal file
@ -0,0 +1,492 @@
|
||||
/*
|
||||
* Copyright 2009-2016 Samy Al Bahra.
|
||||
* Copyright 2013-2016 Olivier Houchard.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_AARCH64_H
|
||||
#define CK_PR_AARCH64_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
/*
|
||||
* Minimum interface requirement met.
|
||||
*/
|
||||
#define CK_F_PR
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_DMB_SY __asm __volatile("dmb ish" : : "r" (0) : "memory")
|
||||
#define CK_DMB_LD __asm __volatile("dmb ishld" : : "r" (0) : "memory")
|
||||
#define CK_DMB_ST __asm __volatile("dmb ishst" : : "r" (0) : "memory")
|
||||
|
||||
#define CK_PR_FENCE(T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
I; \
|
||||
}
|
||||
|
||||
CK_PR_FENCE(atomic, CK_DMB_ST)
|
||||
CK_PR_FENCE(atomic_store, CK_DMB_ST)
|
||||
CK_PR_FENCE(atomic_load, CK_DMB_SY)
|
||||
CK_PR_FENCE(store_atomic, CK_DMB_ST)
|
||||
CK_PR_FENCE(load_atomic, CK_DMB_SY)
|
||||
CK_PR_FENCE(store, CK_DMB_ST)
|
||||
CK_PR_FENCE(store_load, CK_DMB_SY)
|
||||
CK_PR_FENCE(load, CK_DMB_LD)
|
||||
CK_PR_FENCE(load_store, CK_DMB_SY)
|
||||
CK_PR_FENCE(memory, CK_DMB_SY)
|
||||
CK_PR_FENCE(acquire, CK_DMB_SY)
|
||||
CK_PR_FENCE(release, CK_DMB_SY)
|
||||
CK_PR_FENCE(acqrel, CK_DMB_SY)
|
||||
CK_PR_FENCE(lock, CK_DMB_SY)
|
||||
CK_PR_FENCE(unlock, CK_DMB_SY)
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
#undef CK_DMB_SI
|
||||
#undef CK_DMB_LD
|
||||
#undef CK_DMB_ST
|
||||
|
||||
#define CK_PR_LOAD(S, M, T, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
long r = 0; \
|
||||
__asm__ __volatile__(I " %w0, [%1];" \
|
||||
: "=r" (r) \
|
||||
: "r" (target) \
|
||||
: "memory"); \
|
||||
return ((T)r); \
|
||||
}
|
||||
#define CK_PR_LOAD_64(S, M, T, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
long r = 0; \
|
||||
__asm__ __volatile__(I " %0, [%1];" \
|
||||
: "=r" (r) \
|
||||
: "r" (target) \
|
||||
: "memory"); \
|
||||
return ((T)r); \
|
||||
}
|
||||
|
||||
|
||||
CK_PR_LOAD_64(ptr, void, void *, "ldr")
|
||||
|
||||
#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, I)
|
||||
#define CK_PR_LOAD_S_64(S, T, I) CK_PR_LOAD_64(S, T, T, I)
|
||||
|
||||
CK_PR_LOAD_S_64(64, uint64_t, "ldr")
|
||||
CK_PR_LOAD_S(32, uint32_t, "ldr")
|
||||
CK_PR_LOAD_S(16, uint16_t, "ldrh")
|
||||
CK_PR_LOAD_S(8, uint8_t, "ldrb")
|
||||
CK_PR_LOAD_S(uint, unsigned int, "ldr")
|
||||
CK_PR_LOAD_S(int, int, "ldr")
|
||||
CK_PR_LOAD_S(short, short, "ldrh")
|
||||
CK_PR_LOAD_S(char, char, "ldrb")
|
||||
CK_PR_LOAD_S_64(double, double, "ldr")
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD_S_64
|
||||
#undef CK_PR_LOAD
|
||||
#undef CK_PR_LAOD_64
|
||||
|
||||
#define CK_PR_STORE(S, M, T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %w1, [%0]" \
|
||||
: \
|
||||
: "r" (target), \
|
||||
"r" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
#define CK_PR_STORE_64(S, M, T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %1, [%0]" \
|
||||
: \
|
||||
: "r" (target), \
|
||||
"r" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_STORE_64(ptr, void, const void *, "str")
|
||||
|
||||
#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, I)
|
||||
#define CK_PR_STORE_S_64(S, T, I) CK_PR_STORE_64(S, T, T, I)
|
||||
|
||||
CK_PR_STORE_S_64(64, uint64_t, "str")
|
||||
CK_PR_STORE_S(32, uint32_t, "str")
|
||||
CK_PR_STORE_S(16, uint16_t, "strh")
|
||||
CK_PR_STORE_S(8, uint8_t, "strb")
|
||||
CK_PR_STORE_S(uint, unsigned int, "str")
|
||||
CK_PR_STORE_S(int, int, "str")
|
||||
CK_PR_STORE_S(short, short, "strh")
|
||||
CK_PR_STORE_S(char, char, "strb")
|
||||
CK_PR_STORE_S_64(double, double, "str")
|
||||
|
||||
#undef CK_PR_STORE_S
|
||||
#undef CK_PR_STORE_S_64
|
||||
#undef CK_PR_STORE
|
||||
#undef CK_PR_STORE_64
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64_2_value(uint64_t target[2], uint64_t compare[2], uint64_t set[2], uint64_t value[2])
|
||||
{
|
||||
uint64_t tmp1, tmp2;
|
||||
__asm__ __volatile__("1:"
|
||||
"ldxp %0, %1, [%4];"
|
||||
"mov %2, %0;"
|
||||
"mov %3, %1;"
|
||||
"eor %0, %0, %5;"
|
||||
"eor %1, %1, %6;"
|
||||
"orr %1, %0, %1;"
|
||||
"mov %w0, #0;"
|
||||
"cbnz %1, 2f;"
|
||||
"stxp %w0, %7, %8, [%4];"
|
||||
"cbnz %w0, 1b;"
|
||||
"mov %w0, #1;"
|
||||
"2:"
|
||||
: "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
|
||||
: "r" (target), "r" (compare[0]), "r" (compare[1]), "r" (set[0]), "r" (set[1]));
|
||||
|
||||
return (tmp1);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
|
||||
{
|
||||
return (ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *, target),
|
||||
CK_CPP_CAST(uint64_t *, compare),
|
||||
CK_CPP_CAST(uint64_t *, set),
|
||||
CK_CPP_CAST(uint64_t *, value)));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
|
||||
{
|
||||
uint64_t tmp1, tmp2;
|
||||
__asm__ __volatile__("1:"
|
||||
"ldxp %0, %1, [%2];"
|
||||
"eor %0, %0, %3;"
|
||||
"eor %1, %1, %4;"
|
||||
"orr %1, %0, %1;"
|
||||
"mov %w0, #0;"
|
||||
"cbnz %1, 2f;"
|
||||
"stxp %w0, %5, %6, [%2];"
|
||||
"cbnz %w0, 1b;"
|
||||
"mov %w0, #1;"
|
||||
"2:"
|
||||
: "=&r" (tmp1), "=&r" (tmp2)
|
||||
: "r" (target), "r" (compare[0]), "r" (compare[1]), "r" (set[0]), "r" (set[1]));
|
||||
|
||||
return (tmp1);
|
||||
}
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_2(void *target, void *compare, void *set)
|
||||
{
|
||||
return (ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, target),
|
||||
CK_CPP_CAST(uint64_t *, compare),
|
||||
CK_CPP_CAST(uint64_t *, set)));
|
||||
}
|
||||
|
||||
|
||||
#define CK_PR_CAS(N, M, T, W, R) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N##_value(M *target, T compare, T set, M *value) \
|
||||
{ \
|
||||
T previous; \
|
||||
T tmp; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldxr" W " %" R "0, [%2];" \
|
||||
"cmp %" R "0, %" R "4;" \
|
||||
"b.ne 2f;" \
|
||||
"stxr" W " %w1, %" R "3, [%2];" \
|
||||
"cbnz %w1, 1b;" \
|
||||
"2:" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
*(T *)value = previous; \
|
||||
return (previous == compare); \
|
||||
} \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N(M *target, T compare, T set) \
|
||||
{ \
|
||||
T previous; \
|
||||
T tmp; \
|
||||
__asm__ __volatile__( \
|
||||
"1:" \
|
||||
"ldxr" W " %" R "0, [%2];" \
|
||||
"cmp %" R "0, %" R "4;" \
|
||||
"b.ne 2f;" \
|
||||
"stxr" W " %w1, %" R "3, [%2];" \
|
||||
"cbnz %w1, 1b;" \
|
||||
"2:" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return (previous == compare); \
|
||||
}
|
||||
|
||||
CK_PR_CAS(ptr, void, void *, "", "")
|
||||
|
||||
#define CK_PR_CAS_S(N, M, W, R) CK_PR_CAS(N, M, M, W, R)
|
||||
CK_PR_CAS_S(64, uint64_t, "", "")
|
||||
CK_PR_CAS_S(double, double, "", "")
|
||||
CK_PR_CAS_S(32, uint32_t, "", "w")
|
||||
CK_PR_CAS_S(uint, unsigned int, "", "w")
|
||||
CK_PR_CAS_S(int, int, "", "w")
|
||||
CK_PR_CAS_S(16, uint16_t, "h", "w")
|
||||
CK_PR_CAS_S(8, uint8_t, "b", "w")
|
||||
CK_PR_CAS_S(short, short, "h", "w")
|
||||
CK_PR_CAS_S(char, char, "b", "w")
|
||||
|
||||
|
||||
#undef CK_PR_CAS_S
|
||||
#undef CK_PR_CAS
|
||||
|
||||
#define CK_PR_FAS(N, M, T, W, R) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_fas_##N(M *target, T v) \
|
||||
{ \
|
||||
T previous; \
|
||||
T tmp; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldxr" W " %" R "0, [%2];" \
|
||||
"stxr" W " %w1, %" R "3, [%2];"\
|
||||
"cbnz %w1, 1b;" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (v) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAS(64, uint64_t, uint64_t, "", "")
|
||||
CK_PR_FAS(32, uint32_t, uint32_t, "", "w")
|
||||
CK_PR_FAS(ptr, void, void *, "", "")
|
||||
CK_PR_FAS(int, int, int, "", "w")
|
||||
CK_PR_FAS(uint, unsigned int, unsigned int, "", "w")
|
||||
CK_PR_FAS(16, uint16_t, uint16_t, "h", "w")
|
||||
CK_PR_FAS(8, uint8_t, uint8_t, "b", "w")
|
||||
CK_PR_FAS(short, short, short, "h", "w")
|
||||
CK_PR_FAS(char, char, char, "b", "w")
|
||||
|
||||
|
||||
#undef CK_PR_FAS
|
||||
|
||||
#define CK_PR_UNARY(O, N, M, T, I, W, R) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target) \
|
||||
{ \
|
||||
T previous = 0; \
|
||||
T tmp = 0; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldxr" W " %" R "0, [%2];" \
|
||||
I ";" \
|
||||
"stxr" W " %w1, %" R "0, [%2];" \
|
||||
"cbnz %w1, 1b;" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (tmp) \
|
||||
: "r" (target) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_UNARY(inc, ptr, void, void *, "add %0, %0, #1", "", "")
|
||||
CK_PR_UNARY(dec, ptr, void, void *, "sub %0, %0, #1", "", "")
|
||||
CK_PR_UNARY(not, ptr, void, void *, "mvn %0, %0", "", "")
|
||||
CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "", "")
|
||||
CK_PR_UNARY(inc, 64, uint64_t, uint64_t, "add %0, %0, #1", "", "")
|
||||
CK_PR_UNARY(dec, 64, uint64_t, uint64_t, "sub %0, %0, #1", "", "")
|
||||
CK_PR_UNARY(not, 64, uint64_t, uint64_t, "mvn %0, %0", "", "")
|
||||
CK_PR_UNARY(neg, 64, uint64_t, uint64_t, "neg %0, %0", "", "")
|
||||
|
||||
#define CK_PR_UNARY_S(S, T, W) \
|
||||
CK_PR_UNARY(inc, S, T, T, "add %w0, %w0, #1", W, "w") \
|
||||
CK_PR_UNARY(dec, S, T, T, "sub %w0, %w0, #1", W, "w") \
|
||||
CK_PR_UNARY(not, S, T, T, "mvn %w0, %w0", W, "w") \
|
||||
CK_PR_UNARY(neg, S, T, T, "neg %w0, %w0", W, "w") \
|
||||
|
||||
CK_PR_UNARY_S(32, uint32_t, "")
|
||||
CK_PR_UNARY_S(uint, unsigned int, "")
|
||||
CK_PR_UNARY_S(int, int, "")
|
||||
CK_PR_UNARY_S(16, uint16_t, "h")
|
||||
CK_PR_UNARY_S(8, uint8_t, "b")
|
||||
CK_PR_UNARY_S(short, short, "h")
|
||||
CK_PR_UNARY_S(char, char, "b")
|
||||
|
||||
#undef CK_PR_UNARY_S
|
||||
#undef CK_PR_UNARY
|
||||
|
||||
#define CK_PR_BINARY(O, N, M, T, I, W, R) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target, T delta) \
|
||||
{ \
|
||||
T previous; \
|
||||
T tmp; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldxr" W " %" R "0, [%2];"\
|
||||
I " %" R "0, %" R "0, %" R "3;" \
|
||||
"stxr" W " %w1, %" R "0, [%2];" \
|
||||
"cbnz %w1, 1b;" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "", "")
|
||||
CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "", "")
|
||||
CK_PR_BINARY(or, ptr, void, uintptr_t, "orr", "", "")
|
||||
CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "", "")
|
||||
CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "", "")
|
||||
CK_PR_BINARY(and, 64, uint64_t, uint64_t, "and", "", "")
|
||||
CK_PR_BINARY(add, 64, uint64_t, uint64_t, "add", "", "")
|
||||
CK_PR_BINARY(or, 64, uint64_t, uint64_t, "orr", "", "")
|
||||
CK_PR_BINARY(sub, 64, uint64_t, uint64_t, "sub", "", "")
|
||||
CK_PR_BINARY(xor, 64, uint64_t, uint64_t, "eor", "", "")
|
||||
|
||||
#define CK_PR_BINARY_S(S, T, W) \
|
||||
CK_PR_BINARY(and, S, T, T, "and", W, "w") \
|
||||
CK_PR_BINARY(add, S, T, T, "add", W, "w") \
|
||||
CK_PR_BINARY(or, S, T, T, "orr", W, "w") \
|
||||
CK_PR_BINARY(sub, S, T, T, "sub", W, "w") \
|
||||
CK_PR_BINARY(xor, S, T, T, "eor", W, "w")
|
||||
|
||||
CK_PR_BINARY_S(32, uint32_t, "")
|
||||
CK_PR_BINARY_S(uint, unsigned int, "")
|
||||
CK_PR_BINARY_S(int, int, "")
|
||||
CK_PR_BINARY_S(16, uint16_t, "h")
|
||||
CK_PR_BINARY_S(8, uint8_t, "b")
|
||||
CK_PR_BINARY_S(short, short, "h")
|
||||
CK_PR_BINARY_S(char, char, "b")
|
||||
|
||||
#undef CK_PR_BINARY_S
|
||||
#undef CK_PR_BINARY
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_pr_faa_ptr(void *target, uintptr_t delta)
|
||||
{
|
||||
uintptr_t previous, r, tmp;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldxr %0, [%3];"
|
||||
"add %1, %4, %0;"
|
||||
"stxr %w2, %1, [%3];"
|
||||
"cbnz %w2, 1b;"
|
||||
: "=&r" (previous),
|
||||
"=&r" (r),
|
||||
"=&r" (tmp)
|
||||
: "r" (target),
|
||||
"r" (delta)
|
||||
: "memory", "cc");
|
||||
|
||||
return (void *)(previous);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uint64_t
|
||||
ck_pr_faa_64(uint64_t *target, uint64_t delta)
|
||||
{
|
||||
uint64_t previous, r, tmp;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldxr %0, [%3];"
|
||||
"add %1, %4, %0;"
|
||||
"stxr %w2, %1, [%3];"
|
||||
"cbnz %w2, 1b;"
|
||||
: "=&r" (previous),
|
||||
"=&r" (r),
|
||||
"=&r" (tmp)
|
||||
: "r" (target),
|
||||
"r" (delta)
|
||||
: "memory", "cc");
|
||||
|
||||
return (previous);
|
||||
}
|
||||
|
||||
#define CK_PR_FAA(S, T, W) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_faa_##S(T *target, T delta) \
|
||||
{ \
|
||||
T previous, r, tmp; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldxr" W " %w0, [%3];" \
|
||||
"add %w1, %w4, %w0;" \
|
||||
"stxr" W " %w2, %w1, [%3];" \
|
||||
"cbnz %w2, 1b;" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (r), \
|
||||
"=&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAA(32, uint32_t, "")
|
||||
CK_PR_FAA(uint, unsigned int, "")
|
||||
CK_PR_FAA(int, int, "")
|
||||
CK_PR_FAA(16, uint16_t, "h")
|
||||
CK_PR_FAA(8, uint8_t, "b")
|
||||
CK_PR_FAA(short, short, "h")
|
||||
CK_PR_FAA(char, char, "b")
|
||||
|
||||
#undef CK_PR_FAA
|
||||
|
||||
#endif /* CK_PR_AARCH64_H */
|
||||
|
162
sys/contrib/ck/include/gcc/arm/ck_f_pr.h
Normal file
162
sys/contrib/ck/include/gcc/arm/ck_f_pr.h
Normal file
@ -0,0 +1,162 @@
|
||||
/* DO NOT EDIT. This is auto-generated from feature.sh */
|
||||
#define CK_F_PR_ADD_16
|
||||
#define CK_F_PR_ADD_32
|
||||
#define CK_F_PR_ADD_8
|
||||
#define CK_F_PR_ADD_CHAR
|
||||
#define CK_F_PR_ADD_INT
|
||||
#define CK_F_PR_ADD_PTR
|
||||
#define CK_F_PR_ADD_SHORT
|
||||
#define CK_F_PR_ADD_UINT
|
||||
#define CK_F_PR_AND_16
|
||||
#define CK_F_PR_AND_32
|
||||
#define CK_F_PR_AND_8
|
||||
#define CK_F_PR_AND_CHAR
|
||||
#define CK_F_PR_AND_INT
|
||||
#define CK_F_PR_AND_PTR
|
||||
#define CK_F_PR_AND_SHORT
|
||||
#define CK_F_PR_AND_UINT
|
||||
#define CK_F_PR_BARRIER
|
||||
#define CK_F_PR_CAS_16
|
||||
#define CK_F_PR_CAS_16_VALUE
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
||||
#define CK_F_PR_CAS_64
|
||||
#define CK_F_PR_CAS_64_VALUE
|
||||
#define CK_F_PR_CAS_DOUBLE
|
||||
#define CK_F_PR_CAS_DOUBLE_VALUE
|
||||
#endif
|
||||
#define CK_F_PR_CAS_8
|
||||
#define CK_F_PR_CAS_8_VALUE
|
||||
#define CK_F_PR_CAS_CHAR
|
||||
#define CK_F_PR_CAS_CHAR_VALUE
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
||||
#define CK_F_PR_CAS_PTR_2
|
||||
#define CK_F_PR_CAS_PTR_2_VALUE
|
||||
#endif
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_CAS_SHORT
|
||||
#define CK_F_PR_CAS_SHORT_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_DEC_16
|
||||
#define CK_F_PR_DEC_32
|
||||
#define CK_F_PR_DEC_8
|
||||
#define CK_F_PR_DEC_CHAR
|
||||
#define CK_F_PR_DEC_INT
|
||||
#define CK_F_PR_DEC_PTR
|
||||
#define CK_F_PR_DEC_SHORT
|
||||
#define CK_F_PR_DEC_UINT
|
||||
#define CK_F_PR_FAA_16
|
||||
#define CK_F_PR_FAA_32
|
||||
#define CK_F_PR_FAA_8
|
||||
#define CK_F_PR_FAA_CHAR
|
||||
#define CK_F_PR_FAA_INT
|
||||
#define CK_F_PR_FAA_PTR
|
||||
#define CK_F_PR_FAA_SHORT
|
||||
#define CK_F_PR_FAA_UINT
|
||||
#define CK_F_PR_FAS_16
|
||||
#define CK_F_PR_FAS_32
|
||||
#define CK_F_PR_FAS_8
|
||||
#define CK_F_PR_FAS_CHAR
|
||||
#define CK_F_PR_FAS_INT
|
||||
#define CK_F_PR_FAS_PTR
|
||||
#define CK_F_PR_FAS_SHORT
|
||||
#define CK_F_PR_FAS_UINT
|
||||
#define CK_F_PR_FENCE_ATOMIC
|
||||
#define CK_F_PR_FENCE_ATOMIC_LOAD
|
||||
#define CK_F_PR_FENCE_ATOMIC_STORE
|
||||
#define CK_F_PR_FENCE_LOAD
|
||||
#define CK_F_PR_FENCE_LOAD_ATOMIC
|
||||
#define CK_F_PR_FENCE_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_LOAD_STORE
|
||||
#define CK_F_PR_FENCE_MEMORY
|
||||
#define CK_F_PR_FENCE_STORE
|
||||
#define CK_F_PR_FENCE_STORE_ATOMIC
|
||||
#define CK_F_PR_FENCE_STORE_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_ATOMIC
|
||||
#define CK_F_PR_FENCE_STRICT_ATOMIC_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_ATOMIC_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_ATOMIC
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_MEMORY
|
||||
#define CK_F_PR_FENCE_STRICT_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_STORE_ATOMIC
|
||||
#define CK_F_PR_FENCE_STRICT_STORE_LOAD
|
||||
#define CK_F_PR_INC_16
|
||||
#define CK_F_PR_INC_32
|
||||
#define CK_F_PR_INC_8
|
||||
#define CK_F_PR_INC_CHAR
|
||||
#define CK_F_PR_INC_INT
|
||||
#define CK_F_PR_INC_PTR
|
||||
#define CK_F_PR_INC_SHORT
|
||||
#define CK_F_PR_INC_UINT
|
||||
#define CK_F_PR_LOAD_16
|
||||
#define CK_F_PR_LOAD_32
|
||||
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
||||
#define CK_F_PR_LOAD_64
|
||||
#define CK_F_PR_LOAD_DOUBLE
|
||||
#endif
|
||||
#define CK_F_PR_LOAD_8
|
||||
#define CK_F_PR_LOAD_CHAR
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
#define CK_F_PR_LOAD_SHORT
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_NEG_16
|
||||
#define CK_F_PR_NEG_32
|
||||
#define CK_F_PR_NEG_8
|
||||
#define CK_F_PR_NEG_CHAR
|
||||
#define CK_F_PR_NEG_INT
|
||||
#define CK_F_PR_NEG_PTR
|
||||
#define CK_F_PR_NEG_SHORT
|
||||
#define CK_F_PR_NEG_UINT
|
||||
#define CK_F_PR_NOT_16
|
||||
#define CK_F_PR_NOT_32
|
||||
#define CK_F_PR_NOT_8
|
||||
#define CK_F_PR_NOT_CHAR
|
||||
#define CK_F_PR_NOT_INT
|
||||
#define CK_F_PR_NOT_PTR
|
||||
#define CK_F_PR_NOT_SHORT
|
||||
#define CK_F_PR_NOT_UINT
|
||||
#define CK_F_PR_OR_16
|
||||
#define CK_F_PR_OR_32
|
||||
#define CK_F_PR_OR_8
|
||||
#define CK_F_PR_OR_CHAR
|
||||
#define CK_F_PR_OR_INT
|
||||
#define CK_F_PR_OR_PTR
|
||||
#define CK_F_PR_OR_SHORT
|
||||
#define CK_F_PR_OR_UINT
|
||||
#define CK_F_PR_STALL
|
||||
#define CK_F_PR_STORE_16
|
||||
#define CK_F_PR_STORE_32
|
||||
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
||||
#define CK_F_PR_STORE_64
|
||||
#define CK_F_PR_STORE_DOUBLE
|
||||
#endif
|
||||
#define CK_F_PR_STORE_8
|
||||
#define CK_F_PR_STORE_CHAR
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_STORE_SHORT
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_SUB_16
|
||||
#define CK_F_PR_SUB_32
|
||||
#define CK_F_PR_SUB_8
|
||||
#define CK_F_PR_SUB_CHAR
|
||||
#define CK_F_PR_SUB_INT
|
||||
#define CK_F_PR_SUB_PTR
|
||||
#define CK_F_PR_SUB_SHORT
|
||||
#define CK_F_PR_SUB_UINT
|
||||
#define CK_F_PR_XOR_16
|
||||
#define CK_F_PR_XOR_32
|
||||
#define CK_F_PR_XOR_8
|
||||
#define CK_F_PR_XOR_CHAR
|
||||
#define CK_F_PR_XOR_INT
|
||||
#define CK_F_PR_XOR_PTR
|
||||
#define CK_F_PR_XOR_SHORT
|
||||
#define CK_F_PR_XOR_UINT
|
554
sys/contrib/ck/include/gcc/arm/ck_pr.h
Normal file
554
sys/contrib/ck/include/gcc/arm/ck_pr.h
Normal file
@ -0,0 +1,554 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* Copyright 2013-2015 Olivier Houchard.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_ARM_H
|
||||
#define CK_PR_ARM_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
/*
|
||||
* Minimum interface requirement met.
|
||||
*/
|
||||
#define CK_F_PR
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
||||
#define CK_ISB __asm __volatile("isb" : : "r" (0) : "memory")
|
||||
#define CK_DMB __asm __volatile("dmb" : : "r" (0) : "memory")
|
||||
#define CK_DSB __asm __volatile("dsb" : : "r" (0) : "memory")
|
||||
/* FreeBSD's toolchain doesn't accept dmb st, so use the opcode instead */
|
||||
#ifdef __FreeBSD__
|
||||
#define CK_DMB_ST __asm __volatile(".word 0xf57ff05e" : : "r" (0) : "memory")
|
||||
#else
|
||||
#define CK_DMB_ST __asm __volatile("dmb st" : : "r" (0) : "memory")
|
||||
#endif /* __FreeBSD__ */
|
||||
#else
|
||||
/* armv6 doesn't have dsb/dmb/isb, and no way to wait only for stores */
|
||||
#define CK_ISB \
|
||||
__asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
|
||||
#define CK_DSB \
|
||||
__asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
|
||||
#define CK_DMB \
|
||||
__asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
|
||||
#define CK_DMB_ST CK_DMB
|
||||
#endif
|
||||
|
||||
#define CK_PR_FENCE(T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
I; \
|
||||
}
|
||||
|
||||
CK_PR_FENCE(atomic, CK_DMB_ST)
|
||||
CK_PR_FENCE(atomic_store, CK_DMB_ST)
|
||||
CK_PR_FENCE(atomic_load, CK_DMB_ST)
|
||||
CK_PR_FENCE(store_atomic, CK_DMB_ST)
|
||||
CK_PR_FENCE(load_atomic, CK_DMB)
|
||||
CK_PR_FENCE(store, CK_DMB_ST)
|
||||
CK_PR_FENCE(store_load, CK_DMB)
|
||||
CK_PR_FENCE(load, CK_DMB)
|
||||
CK_PR_FENCE(load_store, CK_DMB)
|
||||
CK_PR_FENCE(memory, CK_DMB)
|
||||
CK_PR_FENCE(acquire, CK_DMB)
|
||||
CK_PR_FENCE(release, CK_DMB)
|
||||
CK_PR_FENCE(acqrel, CK_DMB)
|
||||
CK_PR_FENCE(lock, CK_DMB)
|
||||
CK_PR_FENCE(unlock, CK_DMB)
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
#undef CK_ISB
|
||||
#undef CK_DSB
|
||||
#undef CK_DMB
|
||||
#undef CK_DMB_ST
|
||||
|
||||
#define CK_PR_LOAD(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
long r = 0; \
|
||||
__asm__ __volatile__(I " %0, [%1];" \
|
||||
: "=r" (r) \
|
||||
: "r" (target) \
|
||||
: "memory"); \
|
||||
return ((T)r); \
|
||||
}
|
||||
|
||||
CK_PR_LOAD(ptr, void, void *, uint32_t, "ldr")
|
||||
|
||||
#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
|
||||
|
||||
CK_PR_LOAD_S(32, uint32_t, "ldr")
|
||||
CK_PR_LOAD_S(16, uint16_t, "ldrh")
|
||||
CK_PR_LOAD_S(8, uint8_t, "ldrb")
|
||||
CK_PR_LOAD_S(uint, unsigned int, "ldr")
|
||||
CK_PR_LOAD_S(int, int, "ldr")
|
||||
CK_PR_LOAD_S(short, short, "ldrh")
|
||||
CK_PR_LOAD_S(char, char, "ldrb")
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD
|
||||
|
||||
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
||||
|
||||
#define CK_PR_DOUBLE_LOAD(T, N) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##N(const T *target) \
|
||||
{ \
|
||||
register T ret; \
|
||||
\
|
||||
__asm __volatile("ldrexd %0, [%1]" \
|
||||
: "=&r" (ret) \
|
||||
: "r" (target) \
|
||||
: "memory", "cc"); \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
CK_PR_DOUBLE_LOAD(uint64_t, 64)
|
||||
CK_PR_DOUBLE_LOAD(double, double)
|
||||
#undef CK_PR_DOUBLE_LOAD
|
||||
#endif
|
||||
|
||||
#define CK_PR_STORE(S, M, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %1, [%0]" \
|
||||
: \
|
||||
: "r" (target), \
|
||||
"r" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_STORE(ptr, void, const void *, uint32_t, "str")
|
||||
|
||||
#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
|
||||
|
||||
CK_PR_STORE_S(32, uint32_t, "str")
|
||||
CK_PR_STORE_S(16, uint16_t, "strh")
|
||||
CK_PR_STORE_S(8, uint8_t, "strb")
|
||||
CK_PR_STORE_S(uint, unsigned int, "str")
|
||||
CK_PR_STORE_S(int, int, "str")
|
||||
CK_PR_STORE_S(short, short, "strh")
|
||||
CK_PR_STORE_S(char, char, "strb")
|
||||
|
||||
#undef CK_PR_STORE_S
|
||||
#undef CK_PR_STORE
|
||||
|
||||
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
|
||||
|
||||
#define CK_PR_DOUBLE_STORE(T, N) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##N(const T *target, T value) \
|
||||
{ \
|
||||
T tmp; \
|
||||
uint32_t flag; \
|
||||
__asm __volatile("1: \n" \
|
||||
"ldrexd %0, [%2]\n" \
|
||||
"strexd %1, %3, [%2]\n" \
|
||||
"teq %1, #0\n" \
|
||||
"it ne \n" \
|
||||
"bne 1b\n" \
|
||||
: "=&r" (tmp), "=&r" (flag) \
|
||||
: "r" (target), "r" (value) \
|
||||
: "memory", "cc"); \
|
||||
}
|
||||
|
||||
CK_PR_DOUBLE_STORE(uint64_t, 64)
|
||||
CK_PR_DOUBLE_STORE(double, double)
|
||||
|
||||
#undef CK_PR_DOUBLE_STORE
|
||||
|
||||
#define CK_PR_DOUBLE_CAS_VALUE(T, N) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
|
||||
{ \
|
||||
T previous; \
|
||||
int tmp; \
|
||||
\
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldrexd %0, [%4];" \
|
||||
"cmp %Q0, %Q2;" \
|
||||
"ittt eq;" \
|
||||
"cmpeq %R0, %R2;" \
|
||||
"strexdeq %1, %3, [%4];" \
|
||||
"cmpeq %1, #1;" \
|
||||
"beq 1b;" \
|
||||
:"=&r" (previous), "=&r" (tmp) \
|
||||
: "r" (compare), "r" (set) , \
|
||||
"r"(target) \
|
||||
: "memory", "cc"); \
|
||||
*value = previous; \
|
||||
return (*value == compare); \
|
||||
}
|
||||
|
||||
CK_PR_DOUBLE_CAS_VALUE(uint64_t, 64)
|
||||
CK_PR_DOUBLE_CAS_VALUE(double, double)
|
||||
|
||||
#undef CK_PR_DOUBLE_CAS_VALUE
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
|
||||
{
|
||||
uint32_t *_compare = CK_CPP_CAST(uint32_t *, compare);
|
||||
uint32_t *_set = CK_CPP_CAST(uint32_t *, set);
|
||||
uint64_t __compare = ((uint64_t)_compare[0]) | ((uint64_t)_compare[1] << 32);
|
||||
uint64_t __set = ((uint64_t)_set[0]) | ((uint64_t)_set[1] << 32);
|
||||
|
||||
return (ck_pr_cas_64_value(CK_CPP_CAST(uint64_t *, target),
|
||||
__compare,
|
||||
__set,
|
||||
CK_CPP_CAST(uint64_t *, value)));
|
||||
}
|
||||
|
||||
#define CK_PR_DOUBLE_CAS(T, N) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N(T *target, T compare, T set) \
|
||||
{ \
|
||||
int ret; \
|
||||
T tmp; \
|
||||
\
|
||||
__asm__ __volatile__("1:" \
|
||||
"mov %0, #0;" \
|
||||
"ldrexd %1, [%4];" \
|
||||
"cmp %Q1, %Q2;" \
|
||||
"itttt eq;" \
|
||||
"cmpeq %R1, %R2;" \
|
||||
"strexdeq %1, %3, [%4];" \
|
||||
"moveq %0, #1;" \
|
||||
"cmpeq %1, #1;" \
|
||||
"beq 1b;" \
|
||||
: "=&r" (ret), "=&r" (tmp) \
|
||||
: "r" (compare), "r" (set) , \
|
||||
"r"(target) \
|
||||
: "memory", "cc"); \
|
||||
\
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
CK_PR_DOUBLE_CAS(uint64_t, 64)
|
||||
CK_PR_DOUBLE_CAS(double, double)
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_2(void *target, void *compare, void *set)
|
||||
{
|
||||
uint32_t *_compare = CK_CPP_CAST(uint32_t *, compare);
|
||||
uint32_t *_set = CK_CPP_CAST(uint32_t *, set);
|
||||
uint64_t __compare = ((uint64_t)_compare[0]) | ((uint64_t)_compare[1] << 32);
|
||||
uint64_t __set = ((uint64_t)_set[0]) | ((uint64_t)_set[1] << 32);
|
||||
return (ck_pr_cas_64(CK_CPP_CAST(uint64_t *, target),
|
||||
__compare,
|
||||
__set));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value)
|
||||
{
|
||||
void *previous, *tmp;
|
||||
__asm__ __volatile__("1:"
|
||||
"ldrex %0, [%2];"
|
||||
"cmp %0, %4;"
|
||||
"itt eq;"
|
||||
"strexeq %1, %3, [%2];"
|
||||
"cmpeq %1, #1;"
|
||||
"beq 1b;"
|
||||
: "=&r" (previous),
|
||||
"=&r" (tmp)
|
||||
: "r" (target),
|
||||
"r" (set),
|
||||
"r" (compare)
|
||||
: "memory", "cc");
|
||||
*(void **)value = previous;
|
||||
return (previous == compare);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr(void *target, void *compare, void *set)
|
||||
{
|
||||
void *previous, *tmp;
|
||||
__asm__ __volatile__("1:"
|
||||
"ldrex %0, [%2];"
|
||||
"cmp %0, %4;"
|
||||
"itt eq;"
|
||||
"strexeq %1, %3, [%2];"
|
||||
"cmpeq %1, #1;"
|
||||
"beq 1b;"
|
||||
: "=&r" (previous),
|
||||
"=&r" (tmp)
|
||||
: "r" (target),
|
||||
"r" (set),
|
||||
"r" (compare)
|
||||
: "memory", "cc");
|
||||
return (previous == compare);
|
||||
}
|
||||
|
||||
#define CK_PR_CAS(N, T, W) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
|
||||
{ \
|
||||
T previous = 0, tmp = 0; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldrex" W " %0, [%2];" \
|
||||
"cmp %0, %4;" \
|
||||
"itt eq;" \
|
||||
"strex" W "eq %1, %3, [%2];" \
|
||||
"cmpeq %1, #1;" \
|
||||
"beq 1b;" \
|
||||
/* \
|
||||
* Using "+&" instead of "=&" to avoid bogus \
|
||||
* clang warnings. \
|
||||
*/ \
|
||||
: "+&r" (previous), \
|
||||
"+&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
*value = previous; \
|
||||
return (previous == compare); \
|
||||
} \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N(T *target, T compare, T set) \
|
||||
{ \
|
||||
T previous = 0, tmp = 0; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldrex" W " %0, [%2];" \
|
||||
"cmp %0, %4;" \
|
||||
"itt eq;" \
|
||||
"strex" W "eq %1, %3, [%2];" \
|
||||
"cmpeq %1, #1;" \
|
||||
"beq 1b;" \
|
||||
: "+&r" (previous), \
|
||||
"+&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return (previous == compare); \
|
||||
}
|
||||
|
||||
CK_PR_CAS(32, uint32_t, "")
|
||||
CK_PR_CAS(uint, unsigned int, "")
|
||||
CK_PR_CAS(int, int, "")
|
||||
CK_PR_CAS(16, uint16_t, "h")
|
||||
CK_PR_CAS(8, uint8_t, "b")
|
||||
CK_PR_CAS(short, short, "h")
|
||||
CK_PR_CAS(char, char, "b")
|
||||
|
||||
|
||||
#undef CK_PR_CAS
|
||||
|
||||
#define CK_PR_FAS(N, M, T, W) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_fas_##N(M *target, T v) \
|
||||
{ \
|
||||
T previous = 0; \
|
||||
T tmp = 0; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldrex" W " %0, [%2];" \
|
||||
"strex" W " %1, %3, [%2];" \
|
||||
"cmp %1, #0;" \
|
||||
"bne 1b;" \
|
||||
: "+&r" (previous), \
|
||||
"+&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (v) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAS(32, uint32_t, uint32_t, "")
|
||||
CK_PR_FAS(ptr, void, void *, "")
|
||||
CK_PR_FAS(int, int, int, "")
|
||||
CK_PR_FAS(uint, unsigned int, unsigned int, "")
|
||||
CK_PR_FAS(16, uint16_t, uint16_t, "h")
|
||||
CK_PR_FAS(8, uint8_t, uint8_t, "b")
|
||||
CK_PR_FAS(short, short, short, "h")
|
||||
CK_PR_FAS(char, char, char, "b")
|
||||
|
||||
|
||||
#undef CK_PR_FAS
|
||||
|
||||
#define CK_PR_UNARY(O, N, M, T, I, W) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target) \
|
||||
{ \
|
||||
T previous = 0; \
|
||||
T tmp = 0; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldrex" W " %0, [%2];" \
|
||||
I ";" \
|
||||
"strex" W " %1, %0, [%2];" \
|
||||
"cmp %1, #0;" \
|
||||
"bne 1b;" \
|
||||
: "+&r" (previous), \
|
||||
"+&r" (tmp) \
|
||||
: "r" (target) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_UNARY(inc, ptr, void, void *, "add %0, %0, #1", "")
|
||||
CK_PR_UNARY(dec, ptr, void, void *, "sub %0, %0, #1", "")
|
||||
CK_PR_UNARY(not, ptr, void, void *, "mvn %0, %0", "")
|
||||
CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "")
|
||||
|
||||
#define CK_PR_UNARY_S(S, T, W) \
|
||||
CK_PR_UNARY(inc, S, T, T, "add %0, %0, #1", W) \
|
||||
CK_PR_UNARY(dec, S, T, T, "sub %0, %0, #1", W) \
|
||||
CK_PR_UNARY(not, S, T, T, "mvn %0, %0", W) \
|
||||
CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W) \
|
||||
|
||||
CK_PR_UNARY_S(32, uint32_t, "")
|
||||
CK_PR_UNARY_S(uint, unsigned int, "")
|
||||
CK_PR_UNARY_S(int, int, "")
|
||||
CK_PR_UNARY_S(16, uint16_t, "h")
|
||||
CK_PR_UNARY_S(8, uint8_t, "b")
|
||||
CK_PR_UNARY_S(short, short, "h")
|
||||
CK_PR_UNARY_S(char, char, "b")
|
||||
|
||||
#undef CK_PR_UNARY_S
|
||||
#undef CK_PR_UNARY
|
||||
|
||||
#define CK_PR_BINARY(O, N, M, T, I, W) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target, T delta) \
|
||||
{ \
|
||||
T previous = 0; \
|
||||
T tmp = 0; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldrex" W " %0, [%2];" \
|
||||
I " %0, %0, %3;" \
|
||||
"strex" W " %1, %0, [%2];" \
|
||||
"cmp %1, #0;" \
|
||||
"bne 1b;" \
|
||||
: "+&r" (previous), \
|
||||
"+&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "")
|
||||
CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "")
|
||||
CK_PR_BINARY(or, ptr, void, uintptr_t, "orr", "")
|
||||
CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "")
|
||||
CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "")
|
||||
|
||||
#define CK_PR_BINARY_S(S, T, W) \
|
||||
CK_PR_BINARY(and, S, T, T, "and", W) \
|
||||
CK_PR_BINARY(add, S, T, T, "add", W) \
|
||||
CK_PR_BINARY(or, S, T, T, "orr", W) \
|
||||
CK_PR_BINARY(sub, S, T, T, "sub", W) \
|
||||
CK_PR_BINARY(xor, S, T, T, "eor", W)
|
||||
|
||||
CK_PR_BINARY_S(32, uint32_t, "")
|
||||
CK_PR_BINARY_S(uint, unsigned int, "")
|
||||
CK_PR_BINARY_S(int, int, "")
|
||||
CK_PR_BINARY_S(16, uint16_t, "h")
|
||||
CK_PR_BINARY_S(8, uint8_t, "b")
|
||||
CK_PR_BINARY_S(short, short, "h")
|
||||
CK_PR_BINARY_S(char, char, "b")
|
||||
|
||||
#undef CK_PR_BINARY_S
|
||||
#undef CK_PR_BINARY
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_pr_faa_ptr(void *target, uintptr_t delta)
|
||||
{
|
||||
uintptr_t previous, r, tmp;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldrex %0, [%3];"
|
||||
"add %1, %4, %0;"
|
||||
"strex %2, %1, [%3];"
|
||||
"cmp %2, #0;"
|
||||
"bne 1b;"
|
||||
: "=&r" (previous),
|
||||
"=&r" (r),
|
||||
"=&r" (tmp)
|
||||
: "r" (target),
|
||||
"r" (delta)
|
||||
: "memory", "cc");
|
||||
|
||||
return (void *)(previous);
|
||||
}
|
||||
|
||||
#define CK_PR_FAA(S, T, W) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_faa_##S(T *target, T delta) \
|
||||
{ \
|
||||
T previous = 0, r = 0, tmp = 0; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"ldrex" W " %0, [%3];" \
|
||||
"add %1, %4, %0;" \
|
||||
"strex" W " %2, %1, [%3];" \
|
||||
"cmp %2, #0;" \
|
||||
"bne 1b;" \
|
||||
: "+&r" (previous), \
|
||||
"+&r" (r), \
|
||||
"+&r" (tmp) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAA(32, uint32_t, "")
|
||||
CK_PR_FAA(uint, unsigned int, "")
|
||||
CK_PR_FAA(int, int, "")
|
||||
CK_PR_FAA(16, uint16_t, "h")
|
||||
CK_PR_FAA(8, uint8_t, "b")
|
||||
CK_PR_FAA(short, short, "h")
|
||||
CK_PR_FAA(char, char, "b")
|
||||
|
||||
#undef CK_PR_FAA
|
||||
|
||||
#endif /* CK_PR_ARM_H */
|
||||
|
142
sys/contrib/ck/include/gcc/ck_cc.h
Normal file
142
sys/contrib/ck/include/gcc/ck_cc.h
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* Copyright 2014 Paul Khuong.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_GCC_CC_H
|
||||
#define CK_GCC_CC_H
|
||||
|
||||
#include <ck_md.h>
|
||||
|
||||
#ifdef __SUNPRO_C
|
||||
#define CK_CC_UNUSED
|
||||
#define CK_CC_USED
|
||||
#define CK_CC_IMM
|
||||
#define CK_CC_IMM_U32
|
||||
#else
|
||||
#define CK_CC_UNUSED __attribute__((unused))
|
||||
#define CK_CC_USED __attribute__((used))
|
||||
#define CK_CC_IMM "i"
|
||||
#if defined(__x86_64__) || defined(__x86__)
|
||||
#define CK_CC_IMM_U32 "Z"
|
||||
#define CK_CC_IMM_S32 "e"
|
||||
#else
|
||||
#define CK_CC_IMM_U32 CK_CC_IMM
|
||||
#define CK_CC_IMM_S32 CK_CC_IMM
|
||||
#endif /* __x86_64__ || __x86__ */
|
||||
#endif
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
#define CK_CC_INLINE CK_CC_UNUSED inline
|
||||
#else
|
||||
#define CK_CC_INLINE CK_CC_UNUSED
|
||||
#endif
|
||||
|
||||
#define CK_CC_FORCE_INLINE CK_CC_UNUSED __attribute__((always_inline)) inline
|
||||
#define CK_CC_RESTRICT __restrict__
|
||||
|
||||
/*
|
||||
* Packed attribute.
|
||||
*/
|
||||
#define CK_CC_PACKED __attribute__((packed))
|
||||
|
||||
/*
|
||||
* Weak reference.
|
||||
*/
|
||||
#define CK_CC_WEAKREF __attribute__((weakref))
|
||||
|
||||
/*
|
||||
* Alignment attribute.
|
||||
*/
|
||||
#define CK_CC_ALIGN(B) __attribute__((aligned(B)))
|
||||
|
||||
/*
|
||||
* Cache align.
|
||||
*/
|
||||
#define CK_CC_CACHELINE CK_CC_ALIGN(CK_MD_CACHELINE)
|
||||
|
||||
/*
|
||||
* These are functions which should be avoided.
|
||||
*/
|
||||
#ifdef __freestanding__
|
||||
#pragma GCC poison malloc free
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Branch execution hints.
|
||||
*/
|
||||
#define CK_CC_LIKELY(x) (__builtin_expect(!!(x), 1))
|
||||
#define CK_CC_UNLIKELY(x) (__builtin_expect(!!(x), 0))
|
||||
|
||||
/*
|
||||
* Some compilers are overly strict regarding aliasing semantics.
|
||||
* Unfortunately, in many cases it makes more sense to pay aliasing
|
||||
* cost rather than overly expensive register spillage.
|
||||
*/
|
||||
#define CK_CC_ALIASED __attribute__((__may_alias__))
|
||||
|
||||
/*
|
||||
* Compile-time typeof
|
||||
*/
|
||||
#define CK_CC_TYPEOF(X, DEFAULT) __typeof__(X)
|
||||
|
||||
/*
|
||||
* Portability wrappers for bitwise ops.
|
||||
*/
|
||||
|
||||
#define CK_F_CC_FFS
|
||||
#define CK_F_CC_CLZ
|
||||
#define CK_F_CC_CTZ
|
||||
#define CK_F_CC_POPCOUNT
|
||||
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_ffs(unsigned int x)
|
||||
{
|
||||
|
||||
return __builtin_ffs(x);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_clz(unsigned int x)
|
||||
{
|
||||
|
||||
return __builtin_clz(x);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_ctz(unsigned int x)
|
||||
{
|
||||
|
||||
return __builtin_ctz(x);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static int
|
||||
ck_cc_popcount(unsigned int x)
|
||||
{
|
||||
|
||||
return __builtin_popcount(x);
|
||||
}
|
||||
|
||||
#endif /* CK_GCC_CC_H */
|
105
sys/contrib/ck/include/gcc/ck_f_pr.h
Normal file
105
sys/contrib/ck/include/gcc/ck_f_pr.h
Normal file
@ -0,0 +1,105 @@
|
||||
/* DO NOT EDIT. This is auto-generated from feature.sh */
|
||||
#define CK_F_PR_ADD_16
|
||||
#define CK_F_PR_ADD_32
|
||||
#define CK_F_PR_ADD_64
|
||||
#define CK_F_PR_ADD_8
|
||||
#define CK_F_PR_ADD_CHAR
|
||||
#define CK_F_PR_ADD_INT
|
||||
#define CK_F_PR_ADD_PTR
|
||||
#define CK_F_PR_ADD_UINT
|
||||
#define CK_F_PR_AND_16
|
||||
#define CK_F_PR_AND_32
|
||||
#define CK_F_PR_AND_64
|
||||
#define CK_F_PR_AND_8
|
||||
#define CK_F_PR_AND_CHAR
|
||||
#define CK_F_PR_AND_INT
|
||||
#define CK_F_PR_AND_PTR
|
||||
#define CK_F_PR_AND_UINT
|
||||
#define CK_F_PR_CAS_16
|
||||
#define CK_F_PR_CAS_16_VALUE
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#define CK_F_PR_CAS_64
|
||||
#define CK_F_PR_CAS_64_VALUE
|
||||
#define CK_F_PR_CAS_8
|
||||
#define CK_F_PR_CAS_8_VALUE
|
||||
#define CK_F_PR_CAS_CHAR
|
||||
#define CK_F_PR_CAS_CHAR_VALUE
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_DEC_16
|
||||
#define CK_F_PR_DEC_32
|
||||
#define CK_F_PR_DEC_64
|
||||
#define CK_F_PR_DEC_8
|
||||
#define CK_F_PR_DEC_CHAR
|
||||
#define CK_F_PR_DEC_INT
|
||||
#define CK_F_PR_DEC_PTR
|
||||
#define CK_F_PR_DEC_UINT
|
||||
#define CK_F_PR_FAA_16
|
||||
#define CK_F_PR_FAA_32
|
||||
#define CK_F_PR_FAA_64
|
||||
#define CK_F_PR_FAA_8
|
||||
#define CK_F_PR_FAA_CHAR
|
||||
#define CK_F_PR_FAA_INT
|
||||
#define CK_F_PR_FAA_PTR
|
||||
#define CK_F_PR_FAA_UINT
|
||||
#define CK_F_PR_FENCE_LOAD
|
||||
#define CK_F_PR_FENCE_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_MEMORY
|
||||
#define CK_F_PR_FENCE_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_MEMORY
|
||||
#define CK_F_PR_FENCE_STRICT_STORE
|
||||
#define CK_F_PR_INC_16
|
||||
#define CK_F_PR_INC_32
|
||||
#define CK_F_PR_INC_64
|
||||
#define CK_F_PR_INC_8
|
||||
#define CK_F_PR_INC_CHAR
|
||||
#define CK_F_PR_INC_INT
|
||||
#define CK_F_PR_INC_PTR
|
||||
#define CK_F_PR_INC_UINT
|
||||
#define CK_F_PR_LOAD_16
|
||||
#define CK_F_PR_LOAD_32
|
||||
#define CK_F_PR_LOAD_64
|
||||
#define CK_F_PR_LOAD_8
|
||||
#define CK_F_PR_LOAD_CHAR
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_OR_16
|
||||
#define CK_F_PR_OR_32
|
||||
#define CK_F_PR_OR_64
|
||||
#define CK_F_PR_OR_8
|
||||
#define CK_F_PR_OR_CHAR
|
||||
#define CK_F_PR_OR_INT
|
||||
#define CK_F_PR_OR_PTR
|
||||
#define CK_F_PR_OR_UINT
|
||||
#define CK_F_PR_STALL
|
||||
#define CK_F_PR_STORE_16
|
||||
#define CK_F_PR_STORE_32
|
||||
#define CK_F_PR_STORE_64
|
||||
#define CK_F_PR_STORE_8
|
||||
#define CK_F_PR_STORE_CHAR
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_SUB_16
|
||||
#define CK_F_PR_SUB_32
|
||||
#define CK_F_PR_SUB_64
|
||||
#define CK_F_PR_SUB_8
|
||||
#define CK_F_PR_SUB_CHAR
|
||||
#define CK_F_PR_SUB_INT
|
||||
#define CK_F_PR_SUB_PTR
|
||||
#define CK_F_PR_SUB_UINT
|
||||
#define CK_F_PR_XOR_16
|
||||
#define CK_F_PR_XOR_32
|
||||
#define CK_F_PR_XOR_64
|
||||
#define CK_F_PR_XOR_8
|
||||
#define CK_F_PR_XOR_CHAR
|
||||
#define CK_F_PR_XOR_INT
|
||||
#define CK_F_PR_XOR_PTR
|
||||
#define CK_F_PR_XOR_UINT
|
295
sys/contrib/ck/include/gcc/ck_pr.h
Normal file
295
sys/contrib/ck/include/gcc/ck_pr.h
Normal file
@ -0,0 +1,295 @@
|
||||
/*
|
||||
* Copyright 2010 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_GCC_H
|
||||
#define CK_PR_GCC_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_barrier(void)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef CK_F_PR
|
||||
#define CK_F_PR
|
||||
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stdint.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
#define CK_PR_ACCESS(x) (*(volatile __typeof__(x) *)&(x))
|
||||
|
||||
#define CK_PR_LOAD(S, M, T) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
T r; \
|
||||
ck_pr_barrier(); \
|
||||
r = CK_PR_ACCESS(*(const T *)target); \
|
||||
ck_pr_barrier(); \
|
||||
return (r); \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
ck_pr_barrier(); \
|
||||
CK_PR_ACCESS(*(T *)target) = v; \
|
||||
ck_pr_barrier(); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_pr_md_load_ptr(const void *target)
|
||||
{
|
||||
void *r;
|
||||
|
||||
ck_pr_barrier();
|
||||
r = CK_CC_DECONST_PTR(CK_PR_ACCESS(target));
|
||||
ck_pr_barrier();
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_md_store_ptr(void *target, const void *v)
|
||||
{
|
||||
|
||||
ck_pr_barrier();
|
||||
CK_PR_ACCESS(target) = CK_CC_DECONST_PTR(v);
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_PR_LOAD_S(S, T) CK_PR_LOAD(S, T, T)
|
||||
|
||||
CK_PR_LOAD_S(char, char)
|
||||
CK_PR_LOAD_S(uint, unsigned int)
|
||||
CK_PR_LOAD_S(int, int)
|
||||
CK_PR_LOAD_S(double, double)
|
||||
CK_PR_LOAD_S(64, uint64_t)
|
||||
CK_PR_LOAD_S(32, uint32_t)
|
||||
CK_PR_LOAD_S(16, uint16_t)
|
||||
CK_PR_LOAD_S(8, uint8_t)
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
|
||||
ck_pr_barrier();
|
||||
}
|
||||
|
||||
/*
|
||||
* Load and store fences are equivalent to full fences in the GCC port.
|
||||
*/
|
||||
#define CK_PR_FENCE(T) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
__sync_synchronize(); \
|
||||
}
|
||||
|
||||
CK_PR_FENCE(atomic)
|
||||
CK_PR_FENCE(atomic_atomic)
|
||||
CK_PR_FENCE(atomic_load)
|
||||
CK_PR_FENCE(atomic_store)
|
||||
CK_PR_FENCE(store_atomic)
|
||||
CK_PR_FENCE(load_atomic)
|
||||
CK_PR_FENCE(load)
|
||||
CK_PR_FENCE(load_load)
|
||||
CK_PR_FENCE(load_store)
|
||||
CK_PR_FENCE(store)
|
||||
CK_PR_FENCE(store_store)
|
||||
CK_PR_FENCE(store_load)
|
||||
CK_PR_FENCE(memory)
|
||||
CK_PR_FENCE(acquire)
|
||||
CK_PR_FENCE(release)
|
||||
CK_PR_FENCE(acqrel)
|
||||
CK_PR_FENCE(lock)
|
||||
CK_PR_FENCE(unlock)
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
/*
|
||||
* Atomic compare and swap.
|
||||
*/
|
||||
#define CK_PR_CAS(S, M, T) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S(M *target, T compare, T set) \
|
||||
{ \
|
||||
bool z; \
|
||||
z = __sync_bool_compare_and_swap((T *)target, compare, set); \
|
||||
return z; \
|
||||
}
|
||||
|
||||
CK_PR_CAS(ptr, void, void *)
|
||||
|
||||
#define CK_PR_CAS_S(S, T) CK_PR_CAS(S, T, T)
|
||||
|
||||
CK_PR_CAS_S(char, char)
|
||||
CK_PR_CAS_S(int, int)
|
||||
CK_PR_CAS_S(uint, unsigned int)
|
||||
CK_PR_CAS_S(64, uint64_t)
|
||||
CK_PR_CAS_S(32, uint32_t)
|
||||
CK_PR_CAS_S(16, uint16_t)
|
||||
CK_PR_CAS_S(8, uint8_t)
|
||||
|
||||
#undef CK_PR_CAS_S
|
||||
#undef CK_PR_CAS
|
||||
|
||||
/*
|
||||
* Compare and swap, set *v to old value of target.
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *v)
|
||||
{
|
||||
set = __sync_val_compare_and_swap((void **)target, compare, set);
|
||||
*(void **)v = set;
|
||||
return (set == compare);
|
||||
}
|
||||
|
||||
#define CK_PR_CAS_O(S, T) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S##_value(T *target, T compare, T set, T *v) \
|
||||
{ \
|
||||
set = __sync_val_compare_and_swap(target, compare, set);\
|
||||
*v = set; \
|
||||
return (set == compare); \
|
||||
}
|
||||
|
||||
CK_PR_CAS_O(char, char)
|
||||
CK_PR_CAS_O(int, int)
|
||||
CK_PR_CAS_O(uint, unsigned int)
|
||||
CK_PR_CAS_O(64, uint64_t)
|
||||
CK_PR_CAS_O(32, uint32_t)
|
||||
CK_PR_CAS_O(16, uint16_t)
|
||||
CK_PR_CAS_O(8, uint8_t)
|
||||
|
||||
#undef CK_PR_CAS_O
|
||||
|
||||
/*
|
||||
* Atomic fetch-and-add operations.
|
||||
*/
|
||||
#define CK_PR_FAA(S, M, T) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_faa_##S(M *target, T d) \
|
||||
{ \
|
||||
d = __sync_fetch_and_add((T *)target, d); \
|
||||
return (d); \
|
||||
}
|
||||
|
||||
CK_PR_FAA(ptr, void, void *)
|
||||
|
||||
#define CK_PR_FAA_S(S, T) CK_PR_FAA(S, T, T)
|
||||
|
||||
CK_PR_FAA_S(char, char)
|
||||
CK_PR_FAA_S(uint, unsigned int)
|
||||
CK_PR_FAA_S(int, int)
|
||||
CK_PR_FAA_S(64, uint64_t)
|
||||
CK_PR_FAA_S(32, uint32_t)
|
||||
CK_PR_FAA_S(16, uint16_t)
|
||||
CK_PR_FAA_S(8, uint8_t)
|
||||
|
||||
#undef CK_PR_FAA_S
|
||||
#undef CK_PR_FAA
|
||||
|
||||
/*
|
||||
* Atomic store-only binary operations.
|
||||
*/
|
||||
#define CK_PR_BINARY(K, S, M, T) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##K##_##S(M *target, T d) \
|
||||
{ \
|
||||
d = __sync_fetch_and_##K((T *)target, d); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_PR_BINARY_S(K, S, T) CK_PR_BINARY(K, S, T, T)
|
||||
|
||||
#define CK_PR_GENERATE(K) \
|
||||
CK_PR_BINARY(K, ptr, void, void *) \
|
||||
CK_PR_BINARY_S(K, char, char) \
|
||||
CK_PR_BINARY_S(K, int, int) \
|
||||
CK_PR_BINARY_S(K, uint, unsigned int) \
|
||||
CK_PR_BINARY_S(K, 64, uint64_t) \
|
||||
CK_PR_BINARY_S(K, 32, uint32_t) \
|
||||
CK_PR_BINARY_S(K, 16, uint16_t) \
|
||||
CK_PR_BINARY_S(K, 8, uint8_t)
|
||||
|
||||
CK_PR_GENERATE(add)
|
||||
CK_PR_GENERATE(sub)
|
||||
CK_PR_GENERATE(and)
|
||||
CK_PR_GENERATE(or)
|
||||
CK_PR_GENERATE(xor)
|
||||
|
||||
#undef CK_PR_GENERATE
|
||||
#undef CK_PR_BINARY_S
|
||||
#undef CK_PR_BINARY
|
||||
|
||||
#define CK_PR_UNARY(S, M, T) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_inc_##S(M *target) \
|
||||
{ \
|
||||
ck_pr_add_##S(target, (T)1); \
|
||||
return; \
|
||||
} \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_dec_##S(M *target) \
|
||||
{ \
|
||||
ck_pr_sub_##S(target, (T)1); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_PR_UNARY_S(S, M) CK_PR_UNARY(S, M, M)
|
||||
|
||||
CK_PR_UNARY(ptr, void, void *)
|
||||
CK_PR_UNARY_S(char, char)
|
||||
CK_PR_UNARY_S(int, int)
|
||||
CK_PR_UNARY_S(uint, unsigned int)
|
||||
CK_PR_UNARY_S(64, uint64_t)
|
||||
CK_PR_UNARY_S(32, uint32_t)
|
||||
CK_PR_UNARY_S(16, uint16_t)
|
||||
CK_PR_UNARY_S(8, uint8_t)
|
||||
|
||||
#undef CK_PR_UNARY_S
|
||||
#undef CK_PR_UNARY
|
||||
#endif /* !CK_F_PR */
|
||||
#endif /* CK_PR_GCC_H */
|
79
sys/contrib/ck/include/gcc/ppc/ck_f_pr.h
Normal file
79
sys/contrib/ck/include/gcc/ppc/ck_f_pr.h
Normal file
@ -0,0 +1,79 @@
|
||||
/* DO NOT EDIT. This is auto-generated from feature.sh */
|
||||
#define CK_F_PR_ADD_32
|
||||
#define CK_F_PR_ADD_INT
|
||||
#define CK_F_PR_ADD_PTR
|
||||
#define CK_F_PR_ADD_UINT
|
||||
#define CK_F_PR_AND_32
|
||||
#define CK_F_PR_AND_INT
|
||||
#define CK_F_PR_AND_PTR
|
||||
#define CK_F_PR_AND_UINT
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_DEC_32
|
||||
#define CK_F_PR_DEC_INT
|
||||
#define CK_F_PR_DEC_PTR
|
||||
#define CK_F_PR_DEC_UINT
|
||||
#define CK_F_PR_FAA_32
|
||||
#define CK_F_PR_FAA_INT
|
||||
#define CK_F_PR_FAA_PTR
|
||||
#define CK_F_PR_FAA_UINT
|
||||
#define CK_F_PR_FAS_32
|
||||
#define CK_F_PR_FAS_INT
|
||||
#define CK_F_PR_FAS_PTR
|
||||
#define CK_F_PR_FAS_UINT
|
||||
#define CK_F_PR_FENCE_LOAD
|
||||
#define CK_F_PR_FENCE_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_MEMORY
|
||||
#define CK_F_PR_FENCE_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_STRICT_MEMORY
|
||||
#define CK_F_PR_FENCE_STRICT_STORE
|
||||
#define CK_F_PR_INC_32
|
||||
#define CK_F_PR_INC_INT
|
||||
#define CK_F_PR_INC_PTR
|
||||
#define CK_F_PR_INC_UINT
|
||||
#define CK_F_PR_LOAD_16
|
||||
#define CK_F_PR_LOAD_32
|
||||
#define CK_F_PR_LOAD_8
|
||||
#define CK_F_PR_LOAD_CHAR
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
#define CK_F_PR_LOAD_SHORT
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_NEG_32
|
||||
#define CK_F_PR_NEG_INT
|
||||
#define CK_F_PR_NEG_PTR
|
||||
#define CK_F_PR_NEG_UINT
|
||||
#define CK_F_PR_NOT_32
|
||||
#define CK_F_PR_NOT_INT
|
||||
#define CK_F_PR_NOT_PTR
|
||||
#define CK_F_PR_NOT_UINT
|
||||
#define CK_F_PR_OR_32
|
||||
#define CK_F_PR_OR_INT
|
||||
#define CK_F_PR_OR_PTR
|
||||
#define CK_F_PR_OR_UINT
|
||||
#define CK_F_PR_STALL
|
||||
#define CK_F_PR_STORE_16
|
||||
#define CK_F_PR_STORE_32
|
||||
#define CK_F_PR_STORE_8
|
||||
#define CK_F_PR_STORE_CHAR
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_STORE_SHORT
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_SUB_32
|
||||
#define CK_F_PR_SUB_INT
|
||||
#define CK_F_PR_SUB_PTR
|
||||
#define CK_F_PR_SUB_UINT
|
||||
#define CK_F_PR_XOR_32
|
||||
#define CK_F_PR_XOR_INT
|
||||
#define CK_F_PR_XOR_PTR
|
||||
#define CK_F_PR_XOR_UINT
|
||||
|
327
sys/contrib/ck/include/gcc/ppc/ck_pr.h
Normal file
327
sys/contrib/ck/include/gcc/ppc/ck_pr.h
Normal file
@ -0,0 +1,327 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* Copyright 2012 João Fernandes.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_PPC_H
|
||||
#define CK_PR_PPC_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
/*
|
||||
* Minimum interface requirement met.
|
||||
*/
|
||||
#define CK_F_PR
|
||||
|
||||
/*
|
||||
* This bounces the hardware thread from low to medium
|
||||
* priority. I am unsure of the benefits of this approach
|
||||
* but it is used by the Linux kernel.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("or 1, 1, 1;"
|
||||
"or 2, 2, 2;" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_PR_FENCE(T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
__asm__ __volatile__(I ::: "memory"); \
|
||||
}
|
||||
|
||||
CK_PR_FENCE(atomic, "lwsync")
|
||||
CK_PR_FENCE(atomic_store, "lwsync")
|
||||
CK_PR_FENCE(atomic_load, "sync")
|
||||
CK_PR_FENCE(store_atomic, "lwsync")
|
||||
CK_PR_FENCE(load_atomic, "lwsync")
|
||||
CK_PR_FENCE(store, "lwsync")
|
||||
CK_PR_FENCE(store_load, "sync")
|
||||
CK_PR_FENCE(load, "lwsync")
|
||||
CK_PR_FENCE(load_store, "lwsync")
|
||||
CK_PR_FENCE(memory, "sync")
|
||||
CK_PR_FENCE(acquire, "lwsync")
|
||||
CK_PR_FENCE(release, "lwsync")
|
||||
CK_PR_FENCE(acqrel, "lwsync")
|
||||
CK_PR_FENCE(lock, "lwsync")
|
||||
CK_PR_FENCE(unlock, "lwsync")
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
#define CK_PR_LOAD(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
T r; \
|
||||
__asm__ __volatile__(I "%U1%X1 %0, %1" \
|
||||
: "=r" (r) \
|
||||
: "m" (*(const C *)target) \
|
||||
: "memory"); \
|
||||
return (r); \
|
||||
}
|
||||
|
||||
CK_PR_LOAD(ptr, void, void *, uint32_t, "lwz")
|
||||
|
||||
#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
|
||||
|
||||
CK_PR_LOAD_S(32, uint32_t, "lwz")
|
||||
CK_PR_LOAD_S(16, uint16_t, "lhz")
|
||||
CK_PR_LOAD_S(8, uint8_t, "lbz")
|
||||
CK_PR_LOAD_S(uint, unsigned int, "lwz")
|
||||
CK_PR_LOAD_S(int, int, "lwz")
|
||||
CK_PR_LOAD_S(short, short, "lhz")
|
||||
CK_PR_LOAD_S(char, char, "lbz")
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD
|
||||
|
||||
#define CK_PR_STORE(S, M, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I "%U0%X0 %1, %0" \
|
||||
: "=m" (*(C *)target) \
|
||||
: "r" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_STORE(ptr, void, const void *, uint32_t, "stw")
|
||||
|
||||
#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
|
||||
|
||||
CK_PR_STORE_S(32, uint32_t, "stw")
|
||||
CK_PR_STORE_S(16, uint16_t, "sth")
|
||||
CK_PR_STORE_S(8, uint8_t, "stb")
|
||||
CK_PR_STORE_S(uint, unsigned int, "stw")
|
||||
CK_PR_STORE_S(int, int, "stw")
|
||||
CK_PR_STORE_S(short, short, "sth")
|
||||
CK_PR_STORE_S(char, char, "stb")
|
||||
|
||||
#undef CK_PR_STORE_S
|
||||
#undef CK_PR_STORE
|
||||
|
||||
#define CK_PR_CAS(N, T, M) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N##_value(M *target, T compare, T set, M *value) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"lwarx %0, 0, %1;" \
|
||||
"cmpw 0, %0, %3;" \
|
||||
"bne- 2f;" \
|
||||
"stwcx. %2, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
"2:" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
*(T *)value = previous; \
|
||||
return (previous == compare); \
|
||||
} \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N(M *target, T compare, T set) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"lwarx %0, 0, %1;" \
|
||||
"cmpw 0, %0, %3;" \
|
||||
"bne- 2f;" \
|
||||
"stwcx. %2, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
"2:" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return (previous == compare); \
|
||||
}
|
||||
|
||||
CK_PR_CAS(ptr, void *, void)
|
||||
#define CK_PR_CAS_S(a, b) CK_PR_CAS(a, b, b)
|
||||
CK_PR_CAS_S(32, uint32_t)
|
||||
CK_PR_CAS_S(uint, unsigned int)
|
||||
CK_PR_CAS_S(int, int)
|
||||
|
||||
#undef CK_PR_CAS_S
|
||||
#undef CK_PR_CAS
|
||||
|
||||
#define CK_PR_FAS(N, M, T, W) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_fas_##N(M *target, T v) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %1;" \
|
||||
"st" W "cx. %2, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (v) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAS(32, uint32_t, uint32_t, "w")
|
||||
CK_PR_FAS(ptr, void, void *, "w")
|
||||
CK_PR_FAS(int, int, int, "w")
|
||||
CK_PR_FAS(uint, unsigned int, unsigned int, "w")
|
||||
|
||||
#undef CK_PR_FAS
|
||||
|
||||
#define CK_PR_UNARY(O, N, M, T, I, W) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %1;" \
|
||||
I ";" \
|
||||
"st" W "cx. %0, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_UNARY(inc, ptr, void, void *, "addic %0, %0, 1", "w")
|
||||
CK_PR_UNARY(dec, ptr, void, void *, "addic %0, %0, -1", "w")
|
||||
CK_PR_UNARY(not, ptr, void, void *, "not %0, %0", "w")
|
||||
CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "w")
|
||||
|
||||
#define CK_PR_UNARY_S(S, T, W) \
|
||||
CK_PR_UNARY(inc, S, T, T, "addic %0, %0, 1", W) \
|
||||
CK_PR_UNARY(dec, S, T, T, "addic %0, %0, -1", W) \
|
||||
CK_PR_UNARY(not, S, T, T, "not %0, %0", W) \
|
||||
CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W)
|
||||
|
||||
CK_PR_UNARY_S(32, uint32_t, "w")
|
||||
CK_PR_UNARY_S(uint, unsigned int, "w")
|
||||
CK_PR_UNARY_S(int, int, "w")
|
||||
|
||||
#undef CK_PR_UNARY_S
|
||||
#undef CK_PR_UNARY
|
||||
|
||||
#define CK_PR_BINARY(O, N, M, T, I, W) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target, T delta) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %1;" \
|
||||
I " %0, %2, %0;" \
|
||||
"st" W "cx. %0, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "w")
|
||||
CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "w")
|
||||
CK_PR_BINARY(or, ptr, void, uintptr_t, "or", "w")
|
||||
CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "w")
|
||||
CK_PR_BINARY(xor, ptr, void, uintptr_t, "xor", "w")
|
||||
|
||||
#define CK_PR_BINARY_S(S, T, W) \
|
||||
CK_PR_BINARY(and, S, T, T, "and", W) \
|
||||
CK_PR_BINARY(add, S, T, T, "add", W) \
|
||||
CK_PR_BINARY(or, S, T, T, "or", W) \
|
||||
CK_PR_BINARY(sub, S, T, T, "subf", W) \
|
||||
CK_PR_BINARY(xor, S, T, T, "xor", W)
|
||||
|
||||
CK_PR_BINARY_S(32, uint32_t, "w")
|
||||
CK_PR_BINARY_S(uint, unsigned int, "w")
|
||||
CK_PR_BINARY_S(int, int, "w")
|
||||
|
||||
#undef CK_PR_BINARY_S
|
||||
#undef CK_PR_BINARY
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_pr_faa_ptr(void *target, uintptr_t delta)
|
||||
{
|
||||
uintptr_t previous, r;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"lwarx %0, 0, %2;"
|
||||
"add %1, %3, %0;"
|
||||
"stwcx. %1, 0, %2;"
|
||||
"bne- 1b;"
|
||||
: "=&r" (previous),
|
||||
"=&r" (r)
|
||||
: "r" (target),
|
||||
"r" (delta)
|
||||
: "memory", "cc");
|
||||
|
||||
return (void *)(previous);
|
||||
}
|
||||
|
||||
#define CK_PR_FAA(S, T, W) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_faa_##S(T *target, T delta) \
|
||||
{ \
|
||||
T previous, r; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %2;" \
|
||||
"add %1, %3, %0;" \
|
||||
"st" W "cx. %1, 0, %2;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (r) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAA(32, uint32_t, "w")
|
||||
CK_PR_FAA(uint, unsigned int, "w")
|
||||
CK_PR_FAA(int, int, "w")
|
||||
|
||||
#undef CK_PR_FAA
|
||||
|
||||
#endif /* CK_PR_PPC_H */
|
||||
|
97
sys/contrib/ck/include/gcc/ppc64/ck_f_pr.h
Normal file
97
sys/contrib/ck/include/gcc/ppc64/ck_f_pr.h
Normal file
@ -0,0 +1,97 @@
|
||||
/* DO NOT EDIT. This is auto-generated from feature.sh */
|
||||
#define CK_F_PR_ADD_32
|
||||
#define CK_F_PR_ADD_64
|
||||
#define CK_F_PR_ADD_INT
|
||||
#define CK_F_PR_ADD_PTR
|
||||
#define CK_F_PR_ADD_UINT
|
||||
#define CK_F_PR_AND_32
|
||||
#define CK_F_PR_AND_64
|
||||
#define CK_F_PR_AND_INT
|
||||
#define CK_F_PR_AND_PTR
|
||||
#define CK_F_PR_AND_UINT
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#define CK_F_PR_CAS_64
|
||||
#define CK_F_PR_CAS_64_VALUE
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_DEC_32
|
||||
#define CK_F_PR_DEC_64
|
||||
#define CK_F_PR_DEC_INT
|
||||
#define CK_F_PR_DEC_PTR
|
||||
#define CK_F_PR_DEC_UINT
|
||||
#define CK_F_PR_FAA_32
|
||||
#define CK_F_PR_FAA_64
|
||||
#define CK_F_PR_FAA_INT
|
||||
#define CK_F_PR_FAA_PTR
|
||||
#define CK_F_PR_FAA_UINT
|
||||
#define CK_F_PR_FAS_32
|
||||
#define CK_F_PR_FAS_64
|
||||
#define CK_F_PR_FAS_INT
|
||||
#define CK_F_PR_FAS_PTR
|
||||
#define CK_F_PR_FAS_UINT
|
||||
#define CK_F_PR_FAS_DOUBLE
|
||||
#define CK_F_PR_FENCE_LOAD
|
||||
#define CK_F_PR_FENCE_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_MEMORY
|
||||
#define CK_F_PR_FENCE_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_STRICT_MEMORY
|
||||
#define CK_F_PR_FENCE_STRICT_STORE
|
||||
#define CK_F_PR_INC_32
|
||||
#define CK_F_PR_INC_64
|
||||
#define CK_F_PR_INC_INT
|
||||
#define CK_F_PR_INC_PTR
|
||||
#define CK_F_PR_INC_UINT
|
||||
#define CK_F_PR_LOAD_16
|
||||
#define CK_F_PR_LOAD_32
|
||||
#define CK_F_PR_LOAD_64
|
||||
#define CK_F_PR_LOAD_8
|
||||
#define CK_F_PR_LOAD_CHAR
|
||||
#define CK_F_PR_LOAD_DOUBLE
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
#define CK_F_PR_LOAD_SHORT
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_NEG_32
|
||||
#define CK_F_PR_NEG_64
|
||||
#define CK_F_PR_NEG_INT
|
||||
#define CK_F_PR_NEG_PTR
|
||||
#define CK_F_PR_NEG_UINT
|
||||
#define CK_F_PR_NOT_32
|
||||
#define CK_F_PR_NOT_64
|
||||
#define CK_F_PR_NOT_INT
|
||||
#define CK_F_PR_NOT_PTR
|
||||
#define CK_F_PR_NOT_UINT
|
||||
#define CK_F_PR_OR_32
|
||||
#define CK_F_PR_OR_64
|
||||
#define CK_F_PR_OR_INT
|
||||
#define CK_F_PR_OR_PTR
|
||||
#define CK_F_PR_OR_UINT
|
||||
#define CK_F_PR_STALL
|
||||
#define CK_F_PR_STORE_16
|
||||
#define CK_F_PR_STORE_32
|
||||
#define CK_F_PR_STORE_64
|
||||
#define CK_F_PR_STORE_8
|
||||
#define CK_F_PR_STORE_CHAR
|
||||
#define CK_F_PR_STORE_DOUBLE
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_STORE_SHORT
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_SUB_32
|
||||
#define CK_F_PR_SUB_64
|
||||
#define CK_F_PR_SUB_INT
|
||||
#define CK_F_PR_SUB_PTR
|
||||
#define CK_F_PR_SUB_UINT
|
||||
#define CK_F_PR_XOR_32
|
||||
#define CK_F_PR_XOR_64
|
||||
#define CK_F_PR_XOR_INT
|
||||
#define CK_F_PR_XOR_PTR
|
||||
#define CK_F_PR_XOR_UINT
|
||||
|
421
sys/contrib/ck/include/gcc/ppc64/ck_pr.h
Normal file
421
sys/contrib/ck/include/gcc/ppc64/ck_pr.h
Normal file
@ -0,0 +1,421 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_PPC64_H
|
||||
#define CK_PR_PPC64_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
/*
|
||||
* Minimum interface requirement met.
|
||||
*/
|
||||
#define CK_F_PR
|
||||
|
||||
/*
|
||||
* This bounces the hardware thread from low to medium
|
||||
* priority. I am unsure of the benefits of this approach
|
||||
* but it is used by the Linux kernel.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("or 1, 1, 1;"
|
||||
"or 2, 2, 2;" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_PR_FENCE(T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
__asm__ __volatile__(I ::: "memory"); \
|
||||
}
|
||||
|
||||
/*
|
||||
* These are derived from:
|
||||
* http://www.ibm.com/developerworks/systems/articles/powerpc.html
|
||||
*/
|
||||
CK_PR_FENCE(atomic, "lwsync")
|
||||
CK_PR_FENCE(atomic_store, "lwsync")
|
||||
CK_PR_FENCE(atomic_load, "sync")
|
||||
CK_PR_FENCE(store_atomic, "lwsync")
|
||||
CK_PR_FENCE(load_atomic, "lwsync")
|
||||
CK_PR_FENCE(store, "lwsync")
|
||||
CK_PR_FENCE(store_load, "sync")
|
||||
CK_PR_FENCE(load, "lwsync")
|
||||
CK_PR_FENCE(load_store, "lwsync")
|
||||
CK_PR_FENCE(memory, "sync")
|
||||
CK_PR_FENCE(acquire, "lwsync")
|
||||
CK_PR_FENCE(release, "lwsync")
|
||||
CK_PR_FENCE(acqrel, "lwsync")
|
||||
CK_PR_FENCE(lock, "lwsync")
|
||||
CK_PR_FENCE(unlock, "lwsync")
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
#define CK_PR_LOAD(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
T r; \
|
||||
__asm__ __volatile__(I "%U1%X1 %0, %1" \
|
||||
: "=r" (r) \
|
||||
: "m" (*(const C *)target) \
|
||||
: "memory"); \
|
||||
return (r); \
|
||||
}
|
||||
|
||||
CK_PR_LOAD(ptr, void, void *, uint64_t, "ld")
|
||||
|
||||
#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
|
||||
|
||||
CK_PR_LOAD_S(64, uint64_t, "ld")
|
||||
CK_PR_LOAD_S(32, uint32_t, "lwz")
|
||||
CK_PR_LOAD_S(16, uint16_t, "lhz")
|
||||
CK_PR_LOAD_S(8, uint8_t, "lbz")
|
||||
CK_PR_LOAD_S(uint, unsigned int, "lwz")
|
||||
CK_PR_LOAD_S(int, int, "lwz")
|
||||
CK_PR_LOAD_S(short, short, "lhz")
|
||||
CK_PR_LOAD_S(char, char, "lbz")
|
||||
CK_PR_LOAD_S(double, double, "ld")
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD
|
||||
|
||||
#define CK_PR_STORE(S, M, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I "%U0%X0 %1, %0" \
|
||||
: "=m" (*(C *)target) \
|
||||
: "r" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_STORE(ptr, void, const void *, uint64_t, "std")
|
||||
|
||||
#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
|
||||
|
||||
CK_PR_STORE_S(64, uint64_t, "std")
|
||||
CK_PR_STORE_S(32, uint32_t, "stw")
|
||||
CK_PR_STORE_S(16, uint16_t, "sth")
|
||||
CK_PR_STORE_S(8, uint8_t, "stb")
|
||||
CK_PR_STORE_S(uint, unsigned int, "stw")
|
||||
CK_PR_STORE_S(int, int, "stw")
|
||||
CK_PR_STORE_S(short, short, "sth")
|
||||
CK_PR_STORE_S(char, char, "stb")
|
||||
CK_PR_STORE_S(double, double, "std")
|
||||
|
||||
#undef CK_PR_STORE_S
|
||||
#undef CK_PR_STORE
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64_value(uint64_t *target, uint64_t compare, uint64_t set, uint64_t *value)
|
||||
{
|
||||
uint64_t previous;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldarx %0, 0, %1;"
|
||||
"cmpd 0, %0, %3;"
|
||||
"bne- 2f;"
|
||||
"stdcx. %2, 0, %1;"
|
||||
"bne- 1b;"
|
||||
"2:"
|
||||
: "=&r" (previous)
|
||||
: "r" (target),
|
||||
"r" (set),
|
||||
"r" (compare)
|
||||
: "memory", "cc");
|
||||
|
||||
*value = previous;
|
||||
return (previous == compare);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value)
|
||||
{
|
||||
void *previous;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldarx %0, 0, %1;"
|
||||
"cmpd 0, %0, %3;"
|
||||
"bne- 2f;"
|
||||
"stdcx. %2, 0, %1;"
|
||||
"bne- 1b;"
|
||||
"2:"
|
||||
: "=&r" (previous)
|
||||
: "r" (target),
|
||||
"r" (set),
|
||||
"r" (compare)
|
||||
: "memory", "cc");
|
||||
|
||||
ck_pr_md_store_ptr(value, previous);
|
||||
return (previous == compare);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64(uint64_t *target, uint64_t compare, uint64_t set)
|
||||
{
|
||||
uint64_t previous;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldarx %0, 0, %1;"
|
||||
"cmpd 0, %0, %3;"
|
||||
"bne- 2f;"
|
||||
"stdcx. %2, 0, %1;"
|
||||
"bne- 1b;"
|
||||
"2:"
|
||||
: "=&r" (previous)
|
||||
: "r" (target),
|
||||
"r" (set),
|
||||
"r" (compare)
|
||||
: "memory", "cc");
|
||||
|
||||
return (previous == compare);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr(void *target, void *compare, void *set)
|
||||
{
|
||||
void *previous;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldarx %0, 0, %1;"
|
||||
"cmpd 0, %0, %3;"
|
||||
"bne- 2f;"
|
||||
"stdcx. %2, 0, %1;"
|
||||
"bne- 1b;"
|
||||
"2:"
|
||||
: "=&r" (previous)
|
||||
: "r" (target),
|
||||
"r" (set),
|
||||
"r" (compare)
|
||||
: "memory", "cc");
|
||||
|
||||
return (previous == compare);
|
||||
}
|
||||
|
||||
#define CK_PR_CAS(N, T) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"lwarx %0, 0, %1;" \
|
||||
"cmpw 0, %0, %3;" \
|
||||
"bne- 2f;" \
|
||||
"stwcx. %2, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
"2:" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
*value = previous; \
|
||||
return (previous == compare); \
|
||||
} \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N(T *target, T compare, T set) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"lwarx %0, 0, %1;" \
|
||||
"cmpw 0, %0, %3;" \
|
||||
"bne- 2f;" \
|
||||
"stwcx. %2, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
"2:" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (set), \
|
||||
"r" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return (previous == compare); \
|
||||
}
|
||||
|
||||
CK_PR_CAS(32, uint32_t)
|
||||
CK_PR_CAS(uint, unsigned int)
|
||||
CK_PR_CAS(int, int)
|
||||
|
||||
#undef CK_PR_CAS
|
||||
|
||||
#define CK_PR_FAS(N, M, T, W) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_fas_##N(M *target, T v) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %1;" \
|
||||
"st" W "cx. %2, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (v) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAS(64, uint64_t, uint64_t, "d")
|
||||
CK_PR_FAS(32, uint32_t, uint32_t, "w")
|
||||
CK_PR_FAS(double, double, double, "d")
|
||||
CK_PR_FAS(ptr, void, void *, "d")
|
||||
CK_PR_FAS(int, int, int, "w")
|
||||
CK_PR_FAS(uint, unsigned int, unsigned int, "w")
|
||||
|
||||
#undef CK_PR_FAS
|
||||
|
||||
#define CK_PR_UNARY(O, N, M, T, I, W) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %1;" \
|
||||
I ";" \
|
||||
"st" W "cx. %0, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_UNARY(inc, ptr, void, void *, "addic %0, %0, 1", "d")
|
||||
CK_PR_UNARY(dec, ptr, void, void *, "addic %0, %0, -1", "d")
|
||||
CK_PR_UNARY(not, ptr, void, void *, "not %0, %0", "d")
|
||||
CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "d")
|
||||
|
||||
#define CK_PR_UNARY_S(S, T, W) \
|
||||
CK_PR_UNARY(inc, S, T, T, "addic %0, %0, 1", W) \
|
||||
CK_PR_UNARY(dec, S, T, T, "addic %0, %0, -1", W) \
|
||||
CK_PR_UNARY(not, S, T, T, "not %0, %0", W) \
|
||||
CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W)
|
||||
|
||||
CK_PR_UNARY_S(64, uint64_t, "d")
|
||||
CK_PR_UNARY_S(32, uint32_t, "w")
|
||||
CK_PR_UNARY_S(uint, unsigned int, "w")
|
||||
CK_PR_UNARY_S(int, int, "w")
|
||||
|
||||
#undef CK_PR_UNARY_S
|
||||
#undef CK_PR_UNARY
|
||||
|
||||
#define CK_PR_BINARY(O, N, M, T, I, W) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##O##_##N(M *target, T delta) \
|
||||
{ \
|
||||
T previous; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %1;" \
|
||||
I " %0, %2, %0;" \
|
||||
"st" W "cx. %0, 0, %1;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "d")
|
||||
CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "d")
|
||||
CK_PR_BINARY(or, ptr, void, uintptr_t, "or", "d")
|
||||
CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "d")
|
||||
CK_PR_BINARY(xor, ptr, void, uintptr_t, "xor", "d")
|
||||
|
||||
#define CK_PR_BINARY_S(S, T, W) \
|
||||
CK_PR_BINARY(and, S, T, T, "and", W) \
|
||||
CK_PR_BINARY(add, S, T, T, "add", W) \
|
||||
CK_PR_BINARY(or, S, T, T, "or", W) \
|
||||
CK_PR_BINARY(sub, S, T, T, "subf", W) \
|
||||
CK_PR_BINARY(xor, S, T, T, "xor", W)
|
||||
|
||||
CK_PR_BINARY_S(64, uint64_t, "d")
|
||||
CK_PR_BINARY_S(32, uint32_t, "w")
|
||||
CK_PR_BINARY_S(uint, unsigned int, "w")
|
||||
CK_PR_BINARY_S(int, int, "w")
|
||||
|
||||
#undef CK_PR_BINARY_S
|
||||
#undef CK_PR_BINARY
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_pr_faa_ptr(void *target, uintptr_t delta)
|
||||
{
|
||||
uintptr_t previous, r;
|
||||
|
||||
__asm__ __volatile__("1:"
|
||||
"ldarx %0, 0, %2;"
|
||||
"add %1, %3, %0;"
|
||||
"stdcx. %1, 0, %2;"
|
||||
"bne- 1b;"
|
||||
: "=&r" (previous),
|
||||
"=&r" (r)
|
||||
: "r" (target),
|
||||
"r" (delta)
|
||||
: "memory", "cc");
|
||||
|
||||
return (void *)(previous);
|
||||
}
|
||||
|
||||
#define CK_PR_FAA(S, T, W) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_faa_##S(T *target, T delta) \
|
||||
{ \
|
||||
T previous, r; \
|
||||
__asm__ __volatile__("1:" \
|
||||
"l" W "arx %0, 0, %2;" \
|
||||
"add %1, %3, %0;" \
|
||||
"st" W "cx. %1, 0, %2;" \
|
||||
"bne- 1b;" \
|
||||
: "=&r" (previous), \
|
||||
"=&r" (r) \
|
||||
: "r" (target), \
|
||||
"r" (delta) \
|
||||
: "memory", "cc"); \
|
||||
return (previous); \
|
||||
}
|
||||
|
||||
CK_PR_FAA(64, uint64_t, "d")
|
||||
CK_PR_FAA(32, uint32_t, "w")
|
||||
CK_PR_FAA(uint, unsigned int, "w")
|
||||
CK_PR_FAA(int, int, "w")
|
||||
|
||||
#undef CK_PR_FAA
|
||||
|
||||
#endif /* CK_PR_PPC64_H */
|
26
sys/contrib/ck/include/gcc/sparcv9/ck_f_pr.h
Normal file
26
sys/contrib/ck/include/gcc/sparcv9/ck_f_pr.h
Normal file
@ -0,0 +1,26 @@
|
||||
#define CK_F_PR_CAS_64
|
||||
#define CK_F_PR_CAS_64_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_FAS_32
|
||||
#define CK_F_PR_FAS_UINT
|
||||
#define CK_F_PR_FAS_INT
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_STORE_64
|
||||
#define CK_F_PR_STORE_32
|
||||
#define CK_F_PR_STORE_DOUBLE
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_LOAD_64
|
||||
#define CK_F_PR_LOAD_32
|
||||
#define CK_F_PR_LOAD_DOUBLE
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
|
228
sys/contrib/ck/include/gcc/sparcv9/ck_pr.h
Normal file
228
sys/contrib/ck/include/gcc/sparcv9/ck_pr.h
Normal file
@ -0,0 +1,228 @@
|
||||
/*
|
||||
* Copyright 2009, 2010 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_SPARCV9_H
|
||||
#define CK_PR_SPARCV9_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
/*
|
||||
* Minimum interface requirement met.
|
||||
*/
|
||||
#define CK_F_PR
|
||||
|
||||
/*
|
||||
* Order loads at the least.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("membar #LoadLoad" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_PR_FENCE(T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
__asm__ __volatile__(I ::: "memory"); \
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomic operations are treated as both load and store
|
||||
* operations on SPARCv9.
|
||||
*/
|
||||
CK_PR_FENCE(atomic, "membar #StoreStore")
|
||||
CK_PR_FENCE(atomic_store, "membar #StoreStore")
|
||||
CK_PR_FENCE(atomic_load, "membar #StoreLoad")
|
||||
CK_PR_FENCE(store_atomic, "membar #StoreStore")
|
||||
CK_PR_FENCE(load_atomic, "membar #LoadStore")
|
||||
CK_PR_FENCE(store, "membar #StoreStore")
|
||||
CK_PR_FENCE(store_load, "membar #StoreLoad")
|
||||
CK_PR_FENCE(load, "membar #LoadLoad")
|
||||
CK_PR_FENCE(load_store, "membar #LoadStore")
|
||||
CK_PR_FENCE(memory, "membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
|
||||
CK_PR_FENCE(acquire, "membar #LoadLoad | #LoadStore")
|
||||
CK_PR_FENCE(release, "membar #LoadStore | #StoreStore")
|
||||
CK_PR_FENCE(acqrel, "membar #LoadLoad | #LoadStore | #StoreStore")
|
||||
CK_PR_FENCE(lock, "membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
|
||||
CK_PR_FENCE(unlock, "membar #LoadStore | #StoreStore")
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
#define CK_PR_LOAD(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
T r; \
|
||||
__asm__ __volatile__(I " [%1], %0" \
|
||||
: "=&r" (r) \
|
||||
: "r" (target) \
|
||||
: "memory"); \
|
||||
return (r); \
|
||||
}
|
||||
|
||||
CK_PR_LOAD(ptr, void, void *, uint64_t, "ldx")
|
||||
|
||||
#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
|
||||
|
||||
CK_PR_LOAD_S(64, uint64_t, "ldx")
|
||||
CK_PR_LOAD_S(32, uint32_t, "lduw")
|
||||
CK_PR_LOAD_S(uint, unsigned int, "lduw")
|
||||
CK_PR_LOAD_S(double, double, "ldx")
|
||||
CK_PR_LOAD_S(int, int, "ldsw")
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD
|
||||
|
||||
#define CK_PR_STORE(S, M, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %0, [%1]" \
|
||||
: \
|
||||
: "r" (v), \
|
||||
"r" (target) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_STORE(ptr, void, const void *, uint64_t, "stx")
|
||||
|
||||
#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
|
||||
|
||||
CK_PR_STORE_S(8, uint8_t, "stub")
|
||||
CK_PR_STORE_S(64, uint64_t, "stx")
|
||||
CK_PR_STORE_S(32, uint32_t, "stuw")
|
||||
CK_PR_STORE_S(uint, unsigned int, "stuw")
|
||||
CK_PR_STORE_S(double, double, "stx")
|
||||
CK_PR_STORE_S(int, int, "stsw")
|
||||
|
||||
#undef CK_PR_STORE_S
|
||||
#undef CK_PR_STORE
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64_value(uint64_t *target, uint64_t compare, uint64_t set, uint64_t *value)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("casx [%1], %2, %0"
|
||||
: "+&r" (set)
|
||||
: "r" (target),
|
||||
"r" (compare)
|
||||
: "memory");
|
||||
|
||||
*value = set;
|
||||
return (compare == set);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64(uint64_t *target, uint64_t compare, uint64_t set)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("casx [%1], %2, %0"
|
||||
: "+&r" (set)
|
||||
: "r" (target),
|
||||
"r" (compare)
|
||||
: "memory");
|
||||
|
||||
return (compare == set);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr(void *target, void *compare, void *set)
|
||||
{
|
||||
|
||||
return ck_pr_cas_64(target, (uint64_t)compare, (uint64_t)set);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *previous)
|
||||
{
|
||||
|
||||
return ck_pr_cas_64_value(target, (uint64_t)compare, (uint64_t)set, previous);
|
||||
}
|
||||
|
||||
#define CK_PR_CAS(N, T) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
|
||||
{ \
|
||||
__asm__ __volatile__("cas [%1], %2, %0" \
|
||||
: "+&r" (set) \
|
||||
: "r" (target), \
|
||||
"r" (compare) \
|
||||
: "memory"); \
|
||||
*value = set; \
|
||||
return (compare == set); \
|
||||
} \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##N(T *target, T compare, T set) \
|
||||
{ \
|
||||
__asm__ __volatile__("cas [%1], %2, %0" \
|
||||
: "+&r" (set) \
|
||||
: "r" (target), \
|
||||
"r" (compare) \
|
||||
: "memory"); \
|
||||
return (compare == set); \
|
||||
}
|
||||
|
||||
CK_PR_CAS(32, uint32_t)
|
||||
CK_PR_CAS(uint, unsigned int)
|
||||
CK_PR_CAS(int, int)
|
||||
|
||||
#undef CK_PR_CAS
|
||||
|
||||
#define CK_PR_FAS(N, T) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_fas_##N(T *target, T update) \
|
||||
{ \
|
||||
\
|
||||
__asm__ __volatile__("swap [%1], %0" \
|
||||
: "+&r" (update) \
|
||||
: "r" (target) \
|
||||
: "memory"); \
|
||||
return (update); \
|
||||
}
|
||||
|
||||
CK_PR_FAS(int, int)
|
||||
CK_PR_FAS(uint, unsigned int)
|
||||
CK_PR_FAS(32, uint32_t)
|
||||
|
||||
#undef CK_PR_FAS
|
||||
|
||||
#endif /* CK_PR_SPARCV9_H */
|
||||
|
152
sys/contrib/ck/include/gcc/x86/ck_f_pr.h
Normal file
152
sys/contrib/ck/include/gcc/x86/ck_f_pr.h
Normal file
@ -0,0 +1,152 @@
|
||||
/* DO NOT EDIT. This is auto-generated from feature.sh */
|
||||
#define CK_F_PR_ADD_16
|
||||
#define CK_F_PR_ADD_32
|
||||
#define CK_F_PR_ADD_8
|
||||
#define CK_F_PR_ADD_CHAR
|
||||
#define CK_F_PR_ADD_INT
|
||||
#define CK_F_PR_ADD_PTR
|
||||
#define CK_F_PR_ADD_UINT
|
||||
#define CK_F_PR_AND_16
|
||||
#define CK_F_PR_AND_32
|
||||
#define CK_F_PR_AND_8
|
||||
#define CK_F_PR_AND_CHAR
|
||||
#define CK_F_PR_AND_INT
|
||||
#define CK_F_PR_AND_PTR
|
||||
#define CK_F_PR_AND_UINT
|
||||
#define CK_F_PR_BTC_16
|
||||
#define CK_F_PR_BTC_32
|
||||
#define CK_F_PR_BTC_INT
|
||||
#define CK_F_PR_BTC_PTR
|
||||
#define CK_F_PR_BTC_UINT
|
||||
#define CK_F_PR_BTR_16
|
||||
#define CK_F_PR_BTR_32
|
||||
#define CK_F_PR_BTR_INT
|
||||
#define CK_F_PR_BTR_PTR
|
||||
#define CK_F_PR_BTR_UINT
|
||||
#define CK_F_PR_BTS_16
|
||||
#define CK_F_PR_BTS_32
|
||||
#define CK_F_PR_BTS_INT
|
||||
#define CK_F_PR_BTS_PTR
|
||||
#define CK_F_PR_BTS_UINT
|
||||
#define CK_F_PR_CAS_16
|
||||
#define CK_F_PR_CAS_16_VALUE
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#define CK_F_PR_CAS_8
|
||||
#define CK_F_PR_CAS_8_VALUE
|
||||
#define CK_F_PR_CAS_CHAR
|
||||
#define CK_F_PR_CAS_CHAR_VALUE
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_DEC_16
|
||||
#define CK_F_PR_DEC_16_ZERO
|
||||
#define CK_F_PR_DEC_32
|
||||
#define CK_F_PR_DEC_32_ZERO
|
||||
#define CK_F_PR_DEC_8
|
||||
#define CK_F_PR_DEC_8_ZERO
|
||||
#define CK_F_PR_DEC_CHAR
|
||||
#define CK_F_PR_DEC_CHAR_ZERO
|
||||
#define CK_F_PR_DEC_INT
|
||||
#define CK_F_PR_DEC_INT_ZERO
|
||||
#define CK_F_PR_DEC_PTR
|
||||
#define CK_F_PR_DEC_PTR_ZERO
|
||||
#define CK_F_PR_DEC_UINT
|
||||
#define CK_F_PR_DEC_UINT_ZERO
|
||||
#define CK_F_PR_FAA_16
|
||||
#define CK_F_PR_FAA_32
|
||||
#define CK_F_PR_FAA_8
|
||||
#define CK_F_PR_FAA_CHAR
|
||||
#define CK_F_PR_FAA_INT
|
||||
#define CK_F_PR_FAA_PTR
|
||||
#define CK_F_PR_FAA_UINT
|
||||
#define CK_F_PR_FAS_16
|
||||
#define CK_F_PR_FAS_32
|
||||
#define CK_F_PR_FAS_8
|
||||
#define CK_F_PR_FAS_CHAR
|
||||
#define CK_F_PR_FAS_INT
|
||||
#define CK_F_PR_FAS_PTR
|
||||
#define CK_F_PR_FAS_UINT
|
||||
#define CK_F_PR_FENCE_LOAD
|
||||
#define CK_F_PR_FENCE_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_MEMORY
|
||||
#define CK_F_PR_FENCE_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_STRICT_MEMORY
|
||||
#define CK_F_PR_FENCE_STRICT_STORE
|
||||
#define CK_F_PR_INC_16
|
||||
#define CK_F_PR_INC_16_ZERO
|
||||
#define CK_F_PR_INC_32
|
||||
#define CK_F_PR_INC_32_ZERO
|
||||
#define CK_F_PR_INC_8
|
||||
#define CK_F_PR_INC_8_ZERO
|
||||
#define CK_F_PR_INC_CHAR
|
||||
#define CK_F_PR_INC_CHAR_ZERO
|
||||
#define CK_F_PR_INC_INT
|
||||
#define CK_F_PR_INC_INT_ZERO
|
||||
#define CK_F_PR_INC_PTR
|
||||
#define CK_F_PR_INC_PTR_ZERO
|
||||
#define CK_F_PR_INC_UINT
|
||||
#define CK_F_PR_INC_UINT_ZERO
|
||||
#define CK_F_PR_LOAD_16
|
||||
#define CK_F_PR_LOAD_32
|
||||
#define CK_F_PR_LOAD_8
|
||||
#define CK_F_PR_LOAD_CHAR
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_NEG_16
|
||||
#define CK_F_PR_NEG_16_ZERO
|
||||
#define CK_F_PR_NEG_32
|
||||
#define CK_F_PR_NEG_32_ZERO
|
||||
#define CK_F_PR_NEG_8
|
||||
#define CK_F_PR_NEG_8_ZERO
|
||||
#define CK_F_PR_NEG_CHAR
|
||||
#define CK_F_PR_NEG_CHAR_ZERO
|
||||
#define CK_F_PR_NEG_INT
|
||||
#define CK_F_PR_NEG_INT_ZERO
|
||||
#define CK_F_PR_NEG_PTR
|
||||
#define CK_F_PR_NEG_PTR_ZERO
|
||||
#define CK_F_PR_NEG_UINT
|
||||
#define CK_F_PR_NEG_UINT_ZERO
|
||||
#define CK_F_PR_NOT_16
|
||||
#define CK_F_PR_NOT_32
|
||||
#define CK_F_PR_NOT_8
|
||||
#define CK_F_PR_NOT_CHAR
|
||||
#define CK_F_PR_NOT_INT
|
||||
#define CK_F_PR_NOT_PTR
|
||||
#define CK_F_PR_NOT_UINT
|
||||
#define CK_F_PR_OR_16
|
||||
#define CK_F_PR_OR_32
|
||||
#define CK_F_PR_OR_8
|
||||
#define CK_F_PR_OR_CHAR
|
||||
#define CK_F_PR_OR_INT
|
||||
#define CK_F_PR_OR_PTR
|
||||
#define CK_F_PR_OR_UINT
|
||||
#define CK_F_PR_STALL
|
||||
#define CK_F_PR_STORE_16
|
||||
#define CK_F_PR_STORE_32
|
||||
#define CK_F_PR_STORE_8
|
||||
#define CK_F_PR_STORE_CHAR
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_SUB_16
|
||||
#define CK_F_PR_SUB_32
|
||||
#define CK_F_PR_SUB_8
|
||||
#define CK_F_PR_SUB_CHAR
|
||||
#define CK_F_PR_SUB_INT
|
||||
#define CK_F_PR_SUB_PTR
|
||||
#define CK_F_PR_SUB_UINT
|
||||
#define CK_F_PR_XOR_16
|
||||
#define CK_F_PR_XOR_32
|
||||
#define CK_F_PR_XOR_8
|
||||
#define CK_F_PR_XOR_CHAR
|
||||
#define CK_F_PR_XOR_INT
|
||||
#define CK_F_PR_XOR_PTR
|
||||
#define CK_F_PR_XOR_UINT
|
||||
|
390
sys/contrib/ck/include/gcc/x86/ck_pr.h
Normal file
390
sys/contrib/ck/include/gcc/x86/ck_pr.h
Normal file
@ -0,0 +1,390 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* Copyright 2011 Devon H. O'Dell <devon.odell@gmail.com>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_X86_H
|
||||
#define CK_PR_X86_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_stdint.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
/* Minimum requirements for the CK_PR interface are met. */
|
||||
#define CK_F_PR
|
||||
|
||||
#ifdef CK_MD_UMP
|
||||
#define CK_PR_LOCK_PREFIX
|
||||
#else
|
||||
#define CK_PR_LOCK_PREFIX "lock "
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Prevent speculative execution in busy-wait loops (P4 <=)
|
||||
* or "predefined delay".
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
__asm__ __volatile__("pause" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_PR_FENCE(T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
__asm__ __volatile__(I ::: "memory"); \
|
||||
}
|
||||
|
||||
CK_PR_FENCE(atomic, "sfence")
|
||||
CK_PR_FENCE(atomic_store, "sfence")
|
||||
CK_PR_FENCE(atomic_load, "mfence")
|
||||
CK_PR_FENCE(store_atomic, "sfence")
|
||||
CK_PR_FENCE(load_atomic, "mfence")
|
||||
CK_PR_FENCE(load, "lfence")
|
||||
CK_PR_FENCE(load_store, "mfence")
|
||||
CK_PR_FENCE(store, "sfence")
|
||||
CK_PR_FENCE(store_load, "mfence")
|
||||
CK_PR_FENCE(memory, "mfence")
|
||||
CK_PR_FENCE(release, "mfence")
|
||||
CK_PR_FENCE(acquire, "mfence")
|
||||
CK_PR_FENCE(acqrel, "mfence")
|
||||
CK_PR_FENCE(lock, "mfence")
|
||||
CK_PR_FENCE(unlock, "mfence")
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
/*
|
||||
* Atomic fetch-and-store operations.
|
||||
*/
|
||||
#define CK_PR_FAS(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_fas_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %0, %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"+q" (v) \
|
||||
: \
|
||||
: "memory"); \
|
||||
return v; \
|
||||
}
|
||||
|
||||
CK_PR_FAS(ptr, void, void *, char, "xchgl")
|
||||
|
||||
#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
|
||||
|
||||
CK_PR_FAS_S(char, char, "xchgb")
|
||||
CK_PR_FAS_S(uint, unsigned int, "xchgl")
|
||||
CK_PR_FAS_S(int, int, "xchgl")
|
||||
CK_PR_FAS_S(32, uint32_t, "xchgl")
|
||||
CK_PR_FAS_S(16, uint16_t, "xchgw")
|
||||
CK_PR_FAS_S(8, uint8_t, "xchgb")
|
||||
|
||||
#undef CK_PR_FAS_S
|
||||
#undef CK_PR_FAS
|
||||
|
||||
#define CK_PR_LOAD(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
T r; \
|
||||
__asm__ __volatile__(I " %1, %0" \
|
||||
: "=q" (r) \
|
||||
: "m" (*(const C *)target) \
|
||||
: "memory"); \
|
||||
return (r); \
|
||||
}
|
||||
|
||||
CK_PR_LOAD(ptr, void, void *, char, "movl")
|
||||
|
||||
#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
|
||||
|
||||
CK_PR_LOAD_S(char, char, "movb")
|
||||
CK_PR_LOAD_S(uint, unsigned int, "movl")
|
||||
CK_PR_LOAD_S(int, int, "movl")
|
||||
CK_PR_LOAD_S(32, uint32_t, "movl")
|
||||
CK_PR_LOAD_S(16, uint16_t, "movw")
|
||||
CK_PR_LOAD_S(8, uint8_t, "movb")
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD
|
||||
|
||||
#define CK_PR_STORE(S, M, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %1, %0" \
|
||||
: "=m" (*(C *)target) \
|
||||
: CK_CC_IMM "q" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_STORE(ptr, void, const void *, char, "movl")
|
||||
|
||||
#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
|
||||
|
||||
CK_PR_STORE_S(char, char, "movb")
|
||||
CK_PR_STORE_S(uint, unsigned int, "movl")
|
||||
CK_PR_STORE_S(int, int, "movl")
|
||||
CK_PR_STORE_S(32, uint32_t, "movl")
|
||||
CK_PR_STORE_S(16, uint16_t, "movw")
|
||||
CK_PR_STORE_S(8, uint8_t, "movb")
|
||||
|
||||
#undef CK_PR_STORE_S
|
||||
#undef CK_PR_STORE
|
||||
|
||||
/*
|
||||
* Atomic fetch-and-add operations.
|
||||
*/
|
||||
#define CK_PR_FAA(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_faa_##S(M *target, T d) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
|
||||
: "+m" (*(C *)target), \
|
||||
"+q" (d) \
|
||||
: \
|
||||
: "memory", "cc"); \
|
||||
return (d); \
|
||||
}
|
||||
|
||||
CK_PR_FAA(ptr, void, uintptr_t, char, "xaddl")
|
||||
|
||||
#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
|
||||
|
||||
CK_PR_FAA_S(char, char, "xaddb")
|
||||
CK_PR_FAA_S(uint, unsigned int, "xaddl")
|
||||
CK_PR_FAA_S(int, int, "xaddl")
|
||||
CK_PR_FAA_S(32, uint32_t, "xaddl")
|
||||
CK_PR_FAA_S(16, uint16_t, "xaddw")
|
||||
CK_PR_FAA_S(8, uint8_t, "xaddb")
|
||||
|
||||
#undef CK_PR_FAA_S
|
||||
#undef CK_PR_FAA
|
||||
|
||||
/*
|
||||
* Atomic store-only unary operations.
|
||||
*/
|
||||
#define CK_PR_UNARY(K, S, T, C, I) \
|
||||
CK_PR_UNARY_R(K, S, T, C, I) \
|
||||
CK_PR_UNARY_V(K, S, T, C, I)
|
||||
|
||||
#define CK_PR_UNARY_R(K, S, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##K##_##S(T *target) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \
|
||||
: "+m" (*(C *)target) \
|
||||
: \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_PR_UNARY_V(K, S, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##K##_##S##_zero(T *target, bool *r) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=m" (*r) \
|
||||
: \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
|
||||
#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
|
||||
|
||||
#define CK_PR_GENERATE(K) \
|
||||
CK_PR_UNARY(K, ptr, void, char, #K "l") \
|
||||
CK_PR_UNARY_S(K, char, char, #K "b") \
|
||||
CK_PR_UNARY_S(K, int, int, #K "l") \
|
||||
CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \
|
||||
CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \
|
||||
CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \
|
||||
CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
|
||||
|
||||
CK_PR_GENERATE(inc)
|
||||
CK_PR_GENERATE(dec)
|
||||
CK_PR_GENERATE(neg)
|
||||
|
||||
/* not does not affect condition flags. */
|
||||
#undef CK_PR_UNARY_V
|
||||
#define CK_PR_UNARY_V(a, b, c, d, e)
|
||||
CK_PR_GENERATE(not)
|
||||
|
||||
#undef CK_PR_GENERATE
|
||||
#undef CK_PR_UNARY_S
|
||||
#undef CK_PR_UNARY_V
|
||||
#undef CK_PR_UNARY_R
|
||||
#undef CK_PR_UNARY
|
||||
|
||||
/*
|
||||
* Atomic store-only binary operations.
|
||||
*/
|
||||
#define CK_PR_BINARY(K, S, M, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##K##_##S(M *target, T d) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
|
||||
: "+m" (*(C *)target) \
|
||||
: CK_CC_IMM "q" (d) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_PR_BINARY_S(K, S, T, I) CK_PR_BINARY(K, S, T, T, T, I)
|
||||
|
||||
#define CK_PR_GENERATE(K) \
|
||||
CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "l") \
|
||||
CK_PR_BINARY_S(K, char, char, #K "b") \
|
||||
CK_PR_BINARY_S(K, int, int, #K "l") \
|
||||
CK_PR_BINARY_S(K, uint, unsigned int, #K "l") \
|
||||
CK_PR_BINARY_S(K, 32, uint32_t, #K "l") \
|
||||
CK_PR_BINARY_S(K, 16, uint16_t, #K "w") \
|
||||
CK_PR_BINARY_S(K, 8, uint8_t, #K "b")
|
||||
|
||||
CK_PR_GENERATE(add)
|
||||
CK_PR_GENERATE(sub)
|
||||
CK_PR_GENERATE(and)
|
||||
CK_PR_GENERATE(or)
|
||||
CK_PR_GENERATE(xor)
|
||||
|
||||
#undef CK_PR_GENERATE
|
||||
#undef CK_PR_BINARY_S
|
||||
#undef CK_PR_BINARY
|
||||
|
||||
/*
|
||||
* Atomic compare and swap.
|
||||
*/
|
||||
#define CK_PR_CAS(S, M, T, C, I) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S(M *target, T compare, T set) \
|
||||
{ \
|
||||
bool z; \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=a" (z) \
|
||||
: "q" (set), \
|
||||
"a" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return z; \
|
||||
}
|
||||
|
||||
CK_PR_CAS(ptr, void, void *, char, "cmpxchgl")
|
||||
|
||||
#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
|
||||
|
||||
CK_PR_CAS_S(char, char, "cmpxchgb")
|
||||
CK_PR_CAS_S(int, int, "cmpxchgl")
|
||||
CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
|
||||
CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
|
||||
CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
|
||||
CK_PR_CAS_S(8, uint8_t, "cmpxchgb")
|
||||
|
||||
#undef CK_PR_CAS_S
|
||||
#undef CK_PR_CAS
|
||||
|
||||
/*
|
||||
* Compare and swap, set *v to old value of target.
|
||||
*/
|
||||
#define CK_PR_CAS_O(S, M, T, C, I, R) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \
|
||||
{ \
|
||||
bool z; \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \
|
||||
"mov %% " R ", %2;" \
|
||||
"setz %1;" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=a" (z), \
|
||||
"=m" (*(C *)v) \
|
||||
: "q" (set), \
|
||||
"a" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return (bool)z; \
|
||||
}
|
||||
|
||||
CK_PR_CAS_O(ptr, void, void *, char, "l", "eax")
|
||||
|
||||
#define CK_PR_CAS_O_S(S, T, I, R) \
|
||||
CK_PR_CAS_O(S, T, T, T, I, R)
|
||||
|
||||
CK_PR_CAS_O_S(char, char, "b", "al")
|
||||
CK_PR_CAS_O_S(int, int, "l", "eax")
|
||||
CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
|
||||
CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
|
||||
CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
|
||||
CK_PR_CAS_O_S(8, uint8_t, "b", "al")
|
||||
|
||||
#undef CK_PR_CAS_O_S
|
||||
#undef CK_PR_CAS_O
|
||||
|
||||
/*
|
||||
* Atomic bit test operations.
|
||||
*/
|
||||
#define CK_PR_BT(K, S, T, P, C, I) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_##K##_##S(T *target, unsigned int b) \
|
||||
{ \
|
||||
bool c; \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=q" (c) \
|
||||
: "q" ((P)b) \
|
||||
: "memory", "cc"); \
|
||||
return (bool)c; \
|
||||
}
|
||||
|
||||
#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
|
||||
|
||||
#define CK_PR_GENERATE(K) \
|
||||
CK_PR_BT(K, ptr, void, uint32_t, char, #K "l %2, %0") \
|
||||
CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \
|
||||
CK_PR_BT_S(K, int, int, #K "l %2, %0") \
|
||||
CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \
|
||||
CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
|
||||
|
||||
CK_PR_GENERATE(btc)
|
||||
CK_PR_GENERATE(bts)
|
||||
CK_PR_GENERATE(btr)
|
||||
|
||||
#undef CK_PR_GENERATE
|
||||
#undef CK_PR_BT
|
||||
|
||||
#endif /* CK_PR_X86_H */
|
||||
|
202
sys/contrib/ck/include/gcc/x86_64/ck_f_pr.h
Normal file
202
sys/contrib/ck/include/gcc/x86_64/ck_f_pr.h
Normal file
@ -0,0 +1,202 @@
|
||||
/* DO NOT EDIT. This is auto-generated from feature.sh */
|
||||
#define CK_F_PR_ADD_16
|
||||
#define CK_F_PR_ADD_32
|
||||
#define CK_F_PR_ADD_64
|
||||
#define CK_F_PR_ADD_8
|
||||
#define CK_F_PR_ADD_CHAR
|
||||
#define CK_F_PR_ADD_INT
|
||||
#define CK_F_PR_ADD_PTR
|
||||
#define CK_F_PR_ADD_UINT
|
||||
#define CK_F_PR_AND_16
|
||||
#define CK_F_PR_AND_32
|
||||
#define CK_F_PR_AND_64
|
||||
#define CK_F_PR_AND_8
|
||||
#define CK_F_PR_AND_CHAR
|
||||
#define CK_F_PR_AND_INT
|
||||
#define CK_F_PR_AND_PTR
|
||||
#define CK_F_PR_AND_UINT
|
||||
#define CK_F_PR_BTC_16
|
||||
#define CK_F_PR_BTC_32
|
||||
#define CK_F_PR_BTC_64
|
||||
#define CK_F_PR_BTC_INT
|
||||
#define CK_F_PR_BTC_PTR
|
||||
#define CK_F_PR_BTC_UINT
|
||||
#define CK_F_PR_BTR_16
|
||||
#define CK_F_PR_BTR_32
|
||||
#define CK_F_PR_BTR_64
|
||||
#define CK_F_PR_BTR_INT
|
||||
#define CK_F_PR_BTR_PTR
|
||||
#define CK_F_PR_BTR_UINT
|
||||
#define CK_F_PR_BTS_16
|
||||
#define CK_F_PR_BTS_32
|
||||
#define CK_F_PR_BTS_64
|
||||
#define CK_F_PR_BTS_INT
|
||||
#define CK_F_PR_BTS_PTR
|
||||
#define CK_F_PR_BTS_UINT
|
||||
#define CK_F_PR_CAS_16
|
||||
#define CK_F_PR_CAS_16_8
|
||||
#define CK_F_PR_CAS_16_8_VALUE
|
||||
#define CK_F_PR_CAS_16_VALUE
|
||||
#define CK_F_PR_CAS_32
|
||||
#define CK_F_PR_CAS_32_4
|
||||
#define CK_F_PR_CAS_32_4_VALUE
|
||||
#define CK_F_PR_CAS_32_VALUE
|
||||
#define CK_F_PR_CAS_64
|
||||
#define CK_F_PR_CAS_64_2
|
||||
#define CK_F_PR_CAS_64_2_VALUE
|
||||
#define CK_F_PR_CAS_64_VALUE
|
||||
#define CK_F_PR_CAS_8
|
||||
#define CK_F_PR_CAS_8_16
|
||||
#define CK_F_PR_CAS_8_16_VALUE
|
||||
#define CK_F_PR_CAS_8_VALUE
|
||||
#define CK_F_PR_CAS_CHAR
|
||||
#define CK_F_PR_CAS_CHAR_16
|
||||
#define CK_F_PR_CAS_CHAR_16_VALUE
|
||||
#define CK_F_PR_CAS_CHAR_VALUE
|
||||
#define CK_F_PR_CAS_INT
|
||||
#define CK_F_PR_CAS_INT_4
|
||||
#define CK_F_PR_CAS_INT_4_VALUE
|
||||
#define CK_F_PR_CAS_INT_VALUE
|
||||
#define CK_F_PR_CAS_PTR
|
||||
#define CK_F_PR_CAS_PTR_2
|
||||
#define CK_F_PR_CAS_PTR_2_VALUE
|
||||
#define CK_F_PR_CAS_PTR_VALUE
|
||||
#define CK_F_PR_CAS_DOUBLE
|
||||
#define CK_F_PR_CAS_DOUBLE_2
|
||||
#define CK_F_PR_CAS_DOUBLE_VALUE
|
||||
#define CK_F_PR_CAS_UINT
|
||||
#define CK_F_PR_CAS_UINT_4
|
||||
#define CK_F_PR_CAS_UINT_4_VALUE
|
||||
#define CK_F_PR_CAS_UINT_VALUE
|
||||
#define CK_F_PR_DEC_16
|
||||
#define CK_F_PR_DEC_16_ZERO
|
||||
#define CK_F_PR_DEC_32
|
||||
#define CK_F_PR_DEC_32_ZERO
|
||||
#define CK_F_PR_DEC_64
|
||||
#define CK_F_PR_DEC_64_ZERO
|
||||
#define CK_F_PR_DEC_8
|
||||
#define CK_F_PR_DEC_8_ZERO
|
||||
#define CK_F_PR_DEC_CHAR
|
||||
#define CK_F_PR_DEC_CHAR_ZERO
|
||||
#define CK_F_PR_DEC_INT
|
||||
#define CK_F_PR_DEC_INT_ZERO
|
||||
#define CK_F_PR_DEC_PTR
|
||||
#define CK_F_PR_DEC_PTR_ZERO
|
||||
#define CK_F_PR_DEC_UINT
|
||||
#define CK_F_PR_DEC_UINT_ZERO
|
||||
#define CK_F_PR_FAA_16
|
||||
#define CK_F_PR_FAA_32
|
||||
#define CK_F_PR_FAA_64
|
||||
#define CK_F_PR_FAA_8
|
||||
#define CK_F_PR_FAA_CHAR
|
||||
#define CK_F_PR_FAA_INT
|
||||
#define CK_F_PR_FAA_PTR
|
||||
#define CK_F_PR_FAA_UINT
|
||||
#define CK_F_PR_FAS_16
|
||||
#define CK_F_PR_FAS_32
|
||||
#define CK_F_PR_FAS_64
|
||||
#define CK_F_PR_FAS_8
|
||||
#define CK_F_PR_FAS_CHAR
|
||||
#define CK_F_PR_FAS_INT
|
||||
#define CK_F_PR_FAS_PTR
|
||||
#define CK_F_PR_FAS_UINT
|
||||
#define CK_F_PR_FAS_DOUBLE
|
||||
#define CK_F_PR_FENCE_LOAD
|
||||
#define CK_F_PR_FENCE_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_MEMORY
|
||||
#define CK_F_PR_FENCE_STORE
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD
|
||||
#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
|
||||
#define CK_F_PR_FENCE_STRICT_MEMORY
|
||||
#define CK_F_PR_FENCE_STRICT_STORE
|
||||
#define CK_F_PR_INC_16
|
||||
#define CK_F_PR_INC_16_ZERO
|
||||
#define CK_F_PR_INC_32
|
||||
#define CK_F_PR_INC_32_ZERO
|
||||
#define CK_F_PR_INC_64
|
||||
#define CK_F_PR_INC_64_ZERO
|
||||
#define CK_F_PR_INC_8
|
||||
#define CK_F_PR_INC_8_ZERO
|
||||
#define CK_F_PR_INC_CHAR
|
||||
#define CK_F_PR_INC_CHAR_ZERO
|
||||
#define CK_F_PR_INC_INT
|
||||
#define CK_F_PR_INC_INT_ZERO
|
||||
#define CK_F_PR_INC_PTR
|
||||
#define CK_F_PR_INC_PTR_ZERO
|
||||
#define CK_F_PR_INC_UINT
|
||||
#define CK_F_PR_INC_UINT_ZERO
|
||||
#define CK_F_PR_LOAD_16
|
||||
#define CK_F_PR_LOAD_16_8
|
||||
#define CK_F_PR_LOAD_32
|
||||
#define CK_F_PR_LOAD_32_4
|
||||
#define CK_F_PR_LOAD_64
|
||||
#define CK_F_PR_LOAD_64_2
|
||||
#define CK_F_PR_LOAD_8
|
||||
#define CK_F_PR_LOAD_8_16
|
||||
#define CK_F_PR_LOAD_CHAR
|
||||
#define CK_F_PR_LOAD_CHAR_16
|
||||
#define CK_F_PR_LOAD_INT
|
||||
#define CK_F_PR_LOAD_INT_4
|
||||
#define CK_F_PR_LOAD_PTR
|
||||
#define CK_F_PR_LOAD_PTR_2
|
||||
#define CK_F_PR_LOAD_DOUBLE
|
||||
#define CK_F_PR_LOAD_UINT
|
||||
#define CK_F_PR_LOAD_UINT_4
|
||||
#define CK_F_PR_NEG_16
|
||||
#define CK_F_PR_NEG_16_ZERO
|
||||
#define CK_F_PR_NEG_32
|
||||
#define CK_F_PR_NEG_32_ZERO
|
||||
#define CK_F_PR_NEG_64
|
||||
#define CK_F_PR_NEG_64_ZERO
|
||||
#define CK_F_PR_NEG_8
|
||||
#define CK_F_PR_NEG_8_ZERO
|
||||
#define CK_F_PR_NEG_CHAR
|
||||
#define CK_F_PR_NEG_CHAR_ZERO
|
||||
#define CK_F_PR_NEG_INT
|
||||
#define CK_F_PR_NEG_INT_ZERO
|
||||
#define CK_F_PR_NEG_PTR
|
||||
#define CK_F_PR_NEG_PTR_ZERO
|
||||
#define CK_F_PR_NEG_UINT
|
||||
#define CK_F_PR_NEG_UINT_ZERO
|
||||
#define CK_F_PR_NOT_16
|
||||
#define CK_F_PR_NOT_32
|
||||
#define CK_F_PR_NOT_64
|
||||
#define CK_F_PR_NOT_8
|
||||
#define CK_F_PR_NOT_CHAR
|
||||
#define CK_F_PR_NOT_INT
|
||||
#define CK_F_PR_NOT_PTR
|
||||
#define CK_F_PR_NOT_UINT
|
||||
#define CK_F_PR_OR_16
|
||||
#define CK_F_PR_OR_32
|
||||
#define CK_F_PR_OR_64
|
||||
#define CK_F_PR_OR_8
|
||||
#define CK_F_PR_OR_CHAR
|
||||
#define CK_F_PR_OR_INT
|
||||
#define CK_F_PR_OR_PTR
|
||||
#define CK_F_PR_OR_UINT
|
||||
#define CK_F_PR_STORE_16
|
||||
#define CK_F_PR_STORE_32
|
||||
#define CK_F_PR_STORE_64
|
||||
#define CK_F_PR_STORE_8
|
||||
#define CK_F_PR_STORE_CHAR
|
||||
#define CK_F_PR_STORE_INT
|
||||
#define CK_F_PR_STORE_DOUBLE
|
||||
#define CK_F_PR_STORE_PTR
|
||||
#define CK_F_PR_STORE_UINT
|
||||
#define CK_F_PR_SUB_16
|
||||
#define CK_F_PR_SUB_32
|
||||
#define CK_F_PR_SUB_64
|
||||
#define CK_F_PR_SUB_8
|
||||
#define CK_F_PR_SUB_CHAR
|
||||
#define CK_F_PR_SUB_INT
|
||||
#define CK_F_PR_SUB_PTR
|
||||
#define CK_F_PR_SUB_UINT
|
||||
#define CK_F_PR_XOR_16
|
||||
#define CK_F_PR_XOR_32
|
||||
#define CK_F_PR_XOR_64
|
||||
#define CK_F_PR_XOR_8
|
||||
#define CK_F_PR_XOR_CHAR
|
||||
#define CK_F_PR_XOR_INT
|
||||
#define CK_F_PR_XOR_PTR
|
||||
#define CK_F_PR_XOR_UINT
|
||||
|
573
sys/contrib/ck/include/gcc/x86_64/ck_pr.h
Normal file
573
sys/contrib/ck/include/gcc/x86_64/ck_pr.h
Normal file
@ -0,0 +1,573 @@
|
||||
/*
|
||||
* Copyright 2009-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_X86_64_H
|
||||
#define CK_PR_X86_64_H
|
||||
|
||||
#ifndef CK_PR_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_stdint.h>
|
||||
|
||||
/*
|
||||
* The following represent supported atomic operations.
|
||||
* These operations may be emulated.
|
||||
*/
|
||||
#include "ck_f_pr.h"
|
||||
|
||||
/*
|
||||
* Support for TSX extensions.
|
||||
*/
|
||||
#ifdef CK_MD_RTM_ENABLE
|
||||
#include "ck_pr_rtm.h"
|
||||
#endif
|
||||
|
||||
/* Minimum requirements for the CK_PR interface are met. */
|
||||
#define CK_F_PR
|
||||
|
||||
#ifdef CK_MD_UMP
|
||||
#define CK_PR_LOCK_PREFIX
|
||||
#else
|
||||
#define CK_PR_LOCK_PREFIX "lock "
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Prevent speculative execution in busy-wait loops (P4 <=)
|
||||
* or "predefined delay".
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_stall(void)
|
||||
{
|
||||
__asm__ __volatile__("pause" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_PR_FENCE(T, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_fence_strict_##T(void) \
|
||||
{ \
|
||||
__asm__ __volatile__(I ::: "memory"); \
|
||||
}
|
||||
|
||||
CK_PR_FENCE(atomic, "sfence")
|
||||
CK_PR_FENCE(atomic_store, "sfence")
|
||||
CK_PR_FENCE(atomic_load, "mfence")
|
||||
CK_PR_FENCE(store_atomic, "sfence")
|
||||
CK_PR_FENCE(load_atomic, "mfence")
|
||||
CK_PR_FENCE(load, "lfence")
|
||||
CK_PR_FENCE(load_store, "mfence")
|
||||
CK_PR_FENCE(store, "sfence")
|
||||
CK_PR_FENCE(store_load, "mfence")
|
||||
CK_PR_FENCE(memory, "mfence")
|
||||
CK_PR_FENCE(release, "mfence")
|
||||
CK_PR_FENCE(acquire, "mfence")
|
||||
CK_PR_FENCE(acqrel, "mfence")
|
||||
CK_PR_FENCE(lock, "mfence")
|
||||
CK_PR_FENCE(unlock, "mfence")
|
||||
|
||||
#undef CK_PR_FENCE
|
||||
|
||||
/*
|
||||
* Read for ownership. Older compilers will generate the 32-bit
|
||||
* 3DNow! variant which is binary compatible with x86-64 variant
|
||||
* of prefetchw.
|
||||
*/
|
||||
#ifndef CK_F_PR_RFO
|
||||
#define CK_F_PR_RFO
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_rfo(const void *m)
|
||||
{
|
||||
|
||||
__asm__ __volatile__("prefetchw (%0)"
|
||||
:
|
||||
: "r" (m)
|
||||
: "memory");
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_PR_RFO */
|
||||
|
||||
/*
|
||||
* Atomic fetch-and-store operations.
|
||||
*/
|
||||
#define CK_PR_FAS(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_fas_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %0, %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"+q" (v) \
|
||||
: \
|
||||
: "memory"); \
|
||||
return v; \
|
||||
}
|
||||
|
||||
CK_PR_FAS(ptr, void, void *, char, "xchgq")
|
||||
|
||||
#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
|
||||
|
||||
CK_PR_FAS_S(double, double, "xchgq")
|
||||
CK_PR_FAS_S(char, char, "xchgb")
|
||||
CK_PR_FAS_S(uint, unsigned int, "xchgl")
|
||||
CK_PR_FAS_S(int, int, "xchgl")
|
||||
CK_PR_FAS_S(64, uint64_t, "xchgq")
|
||||
CK_PR_FAS_S(32, uint32_t, "xchgl")
|
||||
CK_PR_FAS_S(16, uint16_t, "xchgw")
|
||||
CK_PR_FAS_S(8, uint8_t, "xchgb")
|
||||
|
||||
#undef CK_PR_FAS_S
|
||||
#undef CK_PR_FAS
|
||||
|
||||
/*
|
||||
* Atomic load-from-memory operations.
|
||||
*/
|
||||
#define CK_PR_LOAD(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_md_load_##S(const M *target) \
|
||||
{ \
|
||||
T r; \
|
||||
__asm__ __volatile__(I " %1, %0" \
|
||||
: "=q" (r) \
|
||||
: "m" (*(const C *)target) \
|
||||
: "memory"); \
|
||||
return (r); \
|
||||
}
|
||||
|
||||
CK_PR_LOAD(ptr, void, void *, char, "movq")
|
||||
|
||||
#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
|
||||
|
||||
CK_PR_LOAD_S(char, char, "movb")
|
||||
CK_PR_LOAD_S(uint, unsigned int, "movl")
|
||||
CK_PR_LOAD_S(int, int, "movl")
|
||||
CK_PR_LOAD_S(double, double, "movq")
|
||||
CK_PR_LOAD_S(64, uint64_t, "movq")
|
||||
CK_PR_LOAD_S(32, uint32_t, "movl")
|
||||
CK_PR_LOAD_S(16, uint16_t, "movw")
|
||||
CK_PR_LOAD_S(8, uint8_t, "movb")
|
||||
|
||||
#undef CK_PR_LOAD_S
|
||||
#undef CK_PR_LOAD
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_load_64_2(const uint64_t target[2], uint64_t v[2])
|
||||
{
|
||||
__asm__ __volatile__("movq %%rdx, %%rcx;"
|
||||
"movq %%rax, %%rbx;"
|
||||
CK_PR_LOCK_PREFIX "cmpxchg16b %2;"
|
||||
: "=a" (v[0]),
|
||||
"=d" (v[1])
|
||||
: "m" (*(const uint64_t *)target)
|
||||
: "rbx", "rcx", "memory", "cc");
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_load_ptr_2(const void *t, void *v)
|
||||
{
|
||||
ck_pr_load_64_2(CK_CPP_CAST(const uint64_t *, t),
|
||||
CK_CPP_CAST(uint64_t *, v));
|
||||
return;
|
||||
}
|
||||
|
||||
#define CK_PR_LOAD_2(S, W, T) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_load_##S##_##W(const T t[2], T v[2]) \
|
||||
{ \
|
||||
ck_pr_load_64_2((const uint64_t *)(const void *)t, \
|
||||
(uint64_t *)(void *)v); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_LOAD_2(char, 16, char)
|
||||
CK_PR_LOAD_2(int, 4, int)
|
||||
CK_PR_LOAD_2(uint, 4, unsigned int)
|
||||
CK_PR_LOAD_2(32, 4, uint32_t)
|
||||
CK_PR_LOAD_2(16, 8, uint16_t)
|
||||
CK_PR_LOAD_2(8, 16, uint8_t)
|
||||
|
||||
#undef CK_PR_LOAD_2
|
||||
|
||||
/*
|
||||
* Atomic store-to-memory operations.
|
||||
*/
|
||||
#define CK_PR_STORE_IMM(S, M, T, C, I, K) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %1, %0" \
|
||||
: "=m" (*(C *)target) \
|
||||
: K "q" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_PR_STORE(S, M, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_md_store_##S(M *target, T v) \
|
||||
{ \
|
||||
__asm__ __volatile__(I " %1, %0" \
|
||||
: "=m" (*(C *)target) \
|
||||
: "q" (v) \
|
||||
: "memory"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
CK_PR_STORE_IMM(ptr, void, const void *, char, "movq", CK_CC_IMM_U32)
|
||||
CK_PR_STORE(double, double, double, double, "movq")
|
||||
|
||||
#define CK_PR_STORE_S(S, T, I, K) CK_PR_STORE_IMM(S, T, T, T, I, K)
|
||||
|
||||
CK_PR_STORE_S(char, char, "movb", CK_CC_IMM_S32)
|
||||
CK_PR_STORE_S(int, int, "movl", CK_CC_IMM_S32)
|
||||
CK_PR_STORE_S(uint, unsigned int, "movl", CK_CC_IMM_U32)
|
||||
CK_PR_STORE_S(64, uint64_t, "movq", CK_CC_IMM_U32)
|
||||
CK_PR_STORE_S(32, uint32_t, "movl", CK_CC_IMM_U32)
|
||||
CK_PR_STORE_S(16, uint16_t, "movw", CK_CC_IMM_U32)
|
||||
CK_PR_STORE_S(8, uint8_t, "movb", CK_CC_IMM_U32)
|
||||
|
||||
#undef CK_PR_STORE_S
|
||||
#undef CK_PR_STORE_IMM
|
||||
#undef CK_PR_STORE
|
||||
|
||||
/*
|
||||
* Atomic fetch-and-add operations.
|
||||
*/
|
||||
#define CK_PR_FAA(S, M, T, C, I) \
|
||||
CK_CC_INLINE static T \
|
||||
ck_pr_faa_##S(M *target, T d) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
|
||||
: "+m" (*(C *)target), \
|
||||
"+q" (d) \
|
||||
: \
|
||||
: "memory", "cc"); \
|
||||
return (d); \
|
||||
}
|
||||
|
||||
CK_PR_FAA(ptr, void, uintptr_t, char, "xaddq")
|
||||
|
||||
#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
|
||||
|
||||
CK_PR_FAA_S(char, char, "xaddb")
|
||||
CK_PR_FAA_S(uint, unsigned int, "xaddl")
|
||||
CK_PR_FAA_S(int, int, "xaddl")
|
||||
CK_PR_FAA_S(64, uint64_t, "xaddq")
|
||||
CK_PR_FAA_S(32, uint32_t, "xaddl")
|
||||
CK_PR_FAA_S(16, uint16_t, "xaddw")
|
||||
CK_PR_FAA_S(8, uint8_t, "xaddb")
|
||||
|
||||
#undef CK_PR_FAA_S
|
||||
#undef CK_PR_FAA
|
||||
|
||||
/*
|
||||
* Atomic store-only unary operations.
|
||||
*/
|
||||
#define CK_PR_UNARY(K, S, T, C, I) \
|
||||
CK_PR_UNARY_R(K, S, T, C, I) \
|
||||
CK_PR_UNARY_V(K, S, T, C, I)
|
||||
|
||||
#define CK_PR_UNARY_R(K, S, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##K##_##S(T *target) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \
|
||||
: "+m" (*(C *)target) \
|
||||
: \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_PR_UNARY_V(K, S, T, C, I) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##K##_##S##_zero(T *target, bool *r) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=m" (*r) \
|
||||
: \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
|
||||
#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
|
||||
|
||||
#define CK_PR_GENERATE(K) \
|
||||
CK_PR_UNARY(K, ptr, void, char, #K "q") \
|
||||
CK_PR_UNARY_S(K, char, char, #K "b") \
|
||||
CK_PR_UNARY_S(K, int, int, #K "l") \
|
||||
CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \
|
||||
CK_PR_UNARY_S(K, 64, uint64_t, #K "q") \
|
||||
CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \
|
||||
CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \
|
||||
CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
|
||||
|
||||
CK_PR_GENERATE(inc)
|
||||
CK_PR_GENERATE(dec)
|
||||
CK_PR_GENERATE(neg)
|
||||
|
||||
/* not does not affect condition flags. */
|
||||
#undef CK_PR_UNARY_V
|
||||
#define CK_PR_UNARY_V(a, b, c, d, e)
|
||||
CK_PR_GENERATE(not)
|
||||
|
||||
#undef CK_PR_GENERATE
|
||||
#undef CK_PR_UNARY_S
|
||||
#undef CK_PR_UNARY_V
|
||||
#undef CK_PR_UNARY_R
|
||||
#undef CK_PR_UNARY
|
||||
|
||||
/*
|
||||
* Atomic store-only binary operations.
|
||||
*/
|
||||
#define CK_PR_BINARY(K, S, M, T, C, I, O) \
|
||||
CK_CC_INLINE static void \
|
||||
ck_pr_##K##_##S(M *target, T d) \
|
||||
{ \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
|
||||
: "+m" (*(C *)target) \
|
||||
: O "q" (d) \
|
||||
: "memory", "cc"); \
|
||||
return; \
|
||||
}
|
||||
|
||||
#define CK_PR_BINARY_S(K, S, T, I, O) CK_PR_BINARY(K, S, T, T, T, I, O)
|
||||
|
||||
#define CK_PR_GENERATE(K) \
|
||||
CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "q", CK_CC_IMM_U32) \
|
||||
CK_PR_BINARY_S(K, char, char, #K "b", CK_CC_IMM_S32) \
|
||||
CK_PR_BINARY_S(K, int, int, #K "l", CK_CC_IMM_S32) \
|
||||
CK_PR_BINARY_S(K, uint, unsigned int, #K "l", CK_CC_IMM_U32) \
|
||||
CK_PR_BINARY_S(K, 64, uint64_t, #K "q", CK_CC_IMM_U32) \
|
||||
CK_PR_BINARY_S(K, 32, uint32_t, #K "l", CK_CC_IMM_U32) \
|
||||
CK_PR_BINARY_S(K, 16, uint16_t, #K "w", CK_CC_IMM_U32) \
|
||||
CK_PR_BINARY_S(K, 8, uint8_t, #K "b", CK_CC_IMM_U32)
|
||||
|
||||
CK_PR_GENERATE(add)
|
||||
CK_PR_GENERATE(sub)
|
||||
CK_PR_GENERATE(and)
|
||||
CK_PR_GENERATE(or)
|
||||
CK_PR_GENERATE(xor)
|
||||
|
||||
#undef CK_PR_GENERATE
|
||||
#undef CK_PR_BINARY_S
|
||||
#undef CK_PR_BINARY
|
||||
|
||||
/*
|
||||
* Atomic compare and swap.
|
||||
*/
|
||||
#define CK_PR_CAS(S, M, T, C, I) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S(M *target, T compare, T set) \
|
||||
{ \
|
||||
bool z; \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=a" (z) \
|
||||
: "q" (set), \
|
||||
"a" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return z; \
|
||||
}
|
||||
|
||||
CK_PR_CAS(ptr, void, void *, char, "cmpxchgq")
|
||||
|
||||
#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
|
||||
|
||||
CK_PR_CAS_S(char, char, "cmpxchgb")
|
||||
CK_PR_CAS_S(int, int, "cmpxchgl")
|
||||
CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
|
||||
CK_PR_CAS_S(double, double, "cmpxchgq")
|
||||
CK_PR_CAS_S(64, uint64_t, "cmpxchgq")
|
||||
CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
|
||||
CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
|
||||
CK_PR_CAS_S(8, uint8_t, "cmpxchgb")
|
||||
|
||||
#undef CK_PR_CAS_S
|
||||
#undef CK_PR_CAS
|
||||
|
||||
/*
|
||||
* Compare and swap, set *v to old value of target.
|
||||
*/
|
||||
#define CK_PR_CAS_O(S, M, T, C, I, R) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \
|
||||
{ \
|
||||
bool z; \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \
|
||||
"mov %% " R ", %2;" \
|
||||
"setz %1;" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=a" (z), \
|
||||
"=m" (*(C *)v) \
|
||||
: "q" (set), \
|
||||
"a" (compare) \
|
||||
: "memory", "cc"); \
|
||||
return z; \
|
||||
}
|
||||
|
||||
CK_PR_CAS_O(ptr, void, void *, char, "q", "rax")
|
||||
|
||||
#define CK_PR_CAS_O_S(S, T, I, R) \
|
||||
CK_PR_CAS_O(S, T, T, T, I, R)
|
||||
|
||||
CK_PR_CAS_O_S(char, char, "b", "al")
|
||||
CK_PR_CAS_O_S(int, int, "l", "eax")
|
||||
CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
|
||||
CK_PR_CAS_O_S(double, double, "q", "rax")
|
||||
CK_PR_CAS_O_S(64, uint64_t, "q", "rax")
|
||||
CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
|
||||
CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
|
||||
CK_PR_CAS_O_S(8, uint8_t, "b", "al")
|
||||
|
||||
#undef CK_PR_CAS_O_S
|
||||
#undef CK_PR_CAS_O
|
||||
|
||||
/*
|
||||
* Contrary to C-interface, alignment requirements are that of uint64_t[2].
|
||||
*/
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
|
||||
{
|
||||
bool z;
|
||||
|
||||
__asm__ __volatile__("movq 0(%4), %%rax;"
|
||||
"movq 8(%4), %%rdx;"
|
||||
CK_PR_LOCK_PREFIX "cmpxchg16b %0; setz %1"
|
||||
: "+m" (*target),
|
||||
"=q" (z)
|
||||
: "b" (set[0]),
|
||||
"c" (set[1]),
|
||||
"q" (compare)
|
||||
: "memory", "cc", "%rax", "%rdx");
|
||||
return z;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_2(void *t, void *c, void *s)
|
||||
{
|
||||
return ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, t),
|
||||
CK_CPP_CAST(uint64_t *, c),
|
||||
CK_CPP_CAST(uint64_t *, s));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_64_2_value(uint64_t target[2],
|
||||
uint64_t compare[2],
|
||||
uint64_t set[2],
|
||||
uint64_t v[2])
|
||||
{
|
||||
bool z;
|
||||
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg16b %0;"
|
||||
"setz %3"
|
||||
: "+m" (*target),
|
||||
"=a" (v[0]),
|
||||
"=d" (v[1]),
|
||||
"=q" (z)
|
||||
: "a" (compare[0]),
|
||||
"d" (compare[1]),
|
||||
"b" (set[0]),
|
||||
"c" (set[1])
|
||||
: "memory", "cc");
|
||||
return z;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_cas_ptr_2_value(void *t, void *c, void *s, void *v)
|
||||
{
|
||||
return ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *,t),
|
||||
CK_CPP_CAST(uint64_t *,c),
|
||||
CK_CPP_CAST(uint64_t *,s),
|
||||
CK_CPP_CAST(uint64_t *,v));
|
||||
}
|
||||
|
||||
#define CK_PR_CAS_V(S, W, T) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S##_##W(T t[W], T c[W], T s[W]) \
|
||||
{ \
|
||||
return ck_pr_cas_64_2((uint64_t *)(void *)t, \
|
||||
(uint64_t *)(void *)c, \
|
||||
(uint64_t *)(void *)s); \
|
||||
} \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_cas_##S##_##W##_value(T *t, T c[W], T s[W], T *v) \
|
||||
{ \
|
||||
return ck_pr_cas_64_2_value((uint64_t *)(void *)t, \
|
||||
(uint64_t *)(void *)c, \
|
||||
(uint64_t *)(void *)s, \
|
||||
(uint64_t *)(void *)v); \
|
||||
}
|
||||
|
||||
CK_PR_CAS_V(double, 2, double)
|
||||
CK_PR_CAS_V(char, 16, char)
|
||||
CK_PR_CAS_V(int, 4, int)
|
||||
CK_PR_CAS_V(uint, 4, unsigned int)
|
||||
CK_PR_CAS_V(32, 4, uint32_t)
|
||||
CK_PR_CAS_V(16, 8, uint16_t)
|
||||
CK_PR_CAS_V(8, 16, uint8_t)
|
||||
|
||||
#undef CK_PR_CAS_V
|
||||
|
||||
/*
|
||||
* Atomic bit test operations.
|
||||
*/
|
||||
#define CK_PR_BT(K, S, T, P, C, I) \
|
||||
CK_CC_INLINE static bool \
|
||||
ck_pr_##K##_##S(T *target, unsigned int b) \
|
||||
{ \
|
||||
bool c; \
|
||||
__asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \
|
||||
: "+m" (*(C *)target), \
|
||||
"=q" (c) \
|
||||
: "q" ((P)b) \
|
||||
: "memory", "cc"); \
|
||||
return c; \
|
||||
}
|
||||
|
||||
#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
|
||||
|
||||
#define CK_PR_GENERATE(K) \
|
||||
CK_PR_BT(K, ptr, void, uint64_t, char, #K "q %2, %0") \
|
||||
CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \
|
||||
CK_PR_BT_S(K, int, int, #K "l %2, %0") \
|
||||
CK_PR_BT_S(K, 64, uint64_t, #K "q %2, %0") \
|
||||
CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \
|
||||
CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
|
||||
|
||||
CK_PR_GENERATE(btc)
|
||||
CK_PR_GENERATE(bts)
|
||||
CK_PR_GENERATE(btr)
|
||||
|
||||
#undef CK_PR_GENERATE
|
||||
#undef CK_PR_BT
|
||||
|
||||
#endif /* CK_PR_X86_64_H */
|
||||
|
109
sys/contrib/ck/include/gcc/x86_64/ck_pr_rtm.h
Normal file
109
sys/contrib/ck/include/gcc/x86_64/ck_pr_rtm.h
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright 2013-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (c) 2012,2013 Intel Corporation
|
||||
* Author: Andi Kleen
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that: (1) source code distributions
|
||||
* retain the above copyright notice and this paragraph in its entirety, (2)
|
||||
* distributions including binary code include the above copyright notice and
|
||||
* this paragraph in its entirety in the documentation or other materials
|
||||
* provided with the distribution
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*/
|
||||
|
||||
#ifndef CK_PR_X86_64_RTM_H
|
||||
#define CK_PR_X86_64_RTM_H
|
||||
|
||||
#ifndef CK_PR_X86_64_H
|
||||
#error Do not include this file directly, use ck_pr.h
|
||||
#endif
|
||||
|
||||
#define CK_F_PR_RTM
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
#define CK_PR_RTM_STARTED (~0U)
|
||||
#define CK_PR_RTM_EXPLICIT (1 << 0)
|
||||
#define CK_PR_RTM_RETRY (1 << 1)
|
||||
#define CK_PR_RTM_CONFLICT (1 << 2)
|
||||
#define CK_PR_RTM_CAPACITY (1 << 3)
|
||||
#define CK_PR_RTM_DEBUG (1 << 4)
|
||||
#define CK_PR_RTM_NESTED (1 << 5)
|
||||
#define CK_PR_RTM_CODE(x) (((x) >> 24) & 0xFF)
|
||||
|
||||
CK_CC_INLINE static unsigned int
|
||||
ck_pr_rtm_begin(void)
|
||||
{
|
||||
unsigned int r = CK_PR_RTM_STARTED;
|
||||
|
||||
__asm__ __volatile__(".byte 0xc7,0xf8;"
|
||||
".long 0;"
|
||||
: "+a" (r)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_rtm_end(void)
|
||||
{
|
||||
|
||||
__asm__ __volatile__(".byte 0x0f,0x01,0xd5" ::: "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_pr_rtm_abort(const unsigned int status)
|
||||
{
|
||||
|
||||
__asm__ __volatile__(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory");
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_pr_rtm_test(void)
|
||||
{
|
||||
bool r;
|
||||
|
||||
__asm__ __volatile__(".byte 0x0f,0x01,0xd6;"
|
||||
"setnz %0"
|
||||
: "=r" (r)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
#endif /* CK_PR_X86_64_RTM_H */
|
||||
|
167
sys/contrib/ck/include/spinlock/anderson.h
Normal file
167
sys/contrib/ck/include/spinlock/anderson.h
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_ANDERSON_H
|
||||
#define CK_SPINLOCK_ANDERSON_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_limits.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_ANDERSON
|
||||
#define CK_F_SPINLOCK_ANDERSON
|
||||
/*
|
||||
* This is an implementation of Anderson's array-based queuing lock.
|
||||
*/
|
||||
struct ck_spinlock_anderson_thread {
|
||||
unsigned int locked;
|
||||
unsigned int position;
|
||||
};
|
||||
typedef struct ck_spinlock_anderson_thread ck_spinlock_anderson_thread_t;
|
||||
|
||||
struct ck_spinlock_anderson {
|
||||
struct ck_spinlock_anderson_thread *slots;
|
||||
unsigned int count;
|
||||
unsigned int wrap;
|
||||
unsigned int mask;
|
||||
char pad[CK_MD_CACHELINE - sizeof(unsigned int) * 3 - sizeof(void *)];
|
||||
unsigned int next;
|
||||
};
|
||||
typedef struct ck_spinlock_anderson ck_spinlock_anderson_t;
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_anderson_init(struct ck_spinlock_anderson *lock,
|
||||
struct ck_spinlock_anderson_thread *slots,
|
||||
unsigned int count)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
slots[0].locked = false;
|
||||
slots[0].position = 0;
|
||||
for (i = 1; i < count; i++) {
|
||||
slots[i].locked = true;
|
||||
slots[i].position = i;
|
||||
}
|
||||
|
||||
lock->slots = slots;
|
||||
lock->count = count;
|
||||
lock->mask = count - 1;
|
||||
lock->next = 0;
|
||||
|
||||
/*
|
||||
* If the number of threads is not a power of two then compute
|
||||
* appropriate wrap-around value in the case of next slot counter
|
||||
* overflow.
|
||||
*/
|
||||
if (count & (count - 1))
|
||||
lock->wrap = (UINT_MAX % count) + 1;
|
||||
else
|
||||
lock->wrap = 0;
|
||||
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_anderson_locked(struct ck_spinlock_anderson *lock)
|
||||
{
|
||||
unsigned int position;
|
||||
bool r;
|
||||
|
||||
position = ck_pr_load_uint(&lock->next) & lock->mask;
|
||||
r = ck_pr_load_uint(&lock->slots[position].locked);
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock,
|
||||
struct ck_spinlock_anderson_thread **slot)
|
||||
{
|
||||
unsigned int position, next;
|
||||
unsigned int count = lock->count;
|
||||
|
||||
/*
|
||||
* If count is not a power of 2, then it is possible for an overflow
|
||||
* to reallocate beginning slots to more than one thread. To avoid this
|
||||
* use a compare-and-swap.
|
||||
*/
|
||||
if (lock->wrap != 0) {
|
||||
position = ck_pr_load_uint(&lock->next);
|
||||
|
||||
do {
|
||||
if (position == UINT_MAX)
|
||||
next = lock->wrap;
|
||||
else
|
||||
next = position + 1;
|
||||
} while (ck_pr_cas_uint_value(&lock->next, position,
|
||||
next, &position) == false);
|
||||
|
||||
position %= count;
|
||||
} else {
|
||||
position = ck_pr_faa_uint(&lock->next, 1);
|
||||
position &= lock->mask;
|
||||
}
|
||||
|
||||
/* Serialize with respect to previous thread's store. */
|
||||
ck_pr_fence_load();
|
||||
|
||||
/*
|
||||
* Spin until slot is marked as unlocked. First slot is initialized to
|
||||
* false.
|
||||
*/
|
||||
while (ck_pr_load_uint(&lock->slots[position].locked) == true)
|
||||
ck_pr_stall();
|
||||
|
||||
/* Prepare slot for potential re-use by another thread. */
|
||||
ck_pr_store_uint(&lock->slots[position].locked, true);
|
||||
ck_pr_fence_lock();
|
||||
|
||||
*slot = lock->slots + position;
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
|
||||
struct ck_spinlock_anderson_thread *slot)
|
||||
{
|
||||
unsigned int position;
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
|
||||
/* Mark next slot as available. */
|
||||
if (lock->wrap == 0)
|
||||
position = (slot->position + 1) & lock->mask;
|
||||
else
|
||||
position = (slot->position + 1) % lock->count;
|
||||
|
||||
ck_pr_store_uint(&lock->slots[position].locked, false);
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_SPINLOCK_ANDERSON */
|
||||
#endif /* CK_SPINLOCK_ANDERSON_H */
|
119
sys/contrib/ck/include/spinlock/cas.h
Normal file
119
sys/contrib/ck/include/spinlock/cas.h
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_CAS_H
|
||||
#define CK_SPINLOCK_CAS_H
|
||||
|
||||
#include <ck_backoff.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_elide.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_CAS
|
||||
#define CK_F_SPINLOCK_CAS
|
||||
/*
|
||||
* This is a simple CACAS (TATAS) spinlock implementation.
|
||||
*/
|
||||
struct ck_spinlock_cas {
|
||||
unsigned int value;
|
||||
};
|
||||
typedef struct ck_spinlock_cas ck_spinlock_cas_t;
|
||||
|
||||
#define CK_SPINLOCK_CAS_INITIALIZER {false}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_cas_init(struct ck_spinlock_cas *lock)
|
||||
{
|
||||
|
||||
lock->value = false;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock)
|
||||
{
|
||||
unsigned int value;
|
||||
|
||||
value = ck_pr_fas_uint(&lock->value, true);
|
||||
ck_pr_fence_lock();
|
||||
return !value;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_cas_locked(struct ck_spinlock_cas *lock)
|
||||
{
|
||||
bool r = ck_pr_load_uint(&lock->value);
|
||||
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_cas_lock(struct ck_spinlock_cas *lock)
|
||||
{
|
||||
|
||||
while (ck_pr_cas_uint(&lock->value, false, true) == false) {
|
||||
while (ck_pr_load_uint(&lock->value) == true)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock)
|
||||
{
|
||||
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
|
||||
|
||||
while (ck_pr_cas_uint(&lock->value, false, true) == false)
|
||||
ck_backoff_eb(&backoff);
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock)
|
||||
{
|
||||
|
||||
/* Set lock state to unlocked. */
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&lock->value, false);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
|
||||
ck_spinlock_cas_locked, ck_spinlock_cas_lock,
|
||||
ck_spinlock_cas_locked, ck_spinlock_cas_unlock)
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
|
||||
ck_spinlock_cas_locked, ck_spinlock_cas_trylock)
|
||||
|
||||
#endif /* CK_F_SPINLOCK_CAS */
|
||||
#endif /* CK_SPINLOCK_CAS_H */
|
122
sys/contrib/ck/include/spinlock/clh.h
Normal file
122
sys/contrib/ck/include/spinlock/clh.h
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_CLH_H
|
||||
#define CK_SPINLOCK_CLH_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_limits.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_CLH
|
||||
#define CK_F_SPINLOCK_CLH
|
||||
|
||||
struct ck_spinlock_clh {
|
||||
unsigned int wait;
|
||||
struct ck_spinlock_clh *previous;
|
||||
};
|
||||
typedef struct ck_spinlock_clh ck_spinlock_clh_t;
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_clh_init(struct ck_spinlock_clh **lock, struct ck_spinlock_clh *unowned)
|
||||
{
|
||||
|
||||
unowned->previous = NULL;
|
||||
unowned->wait = false;
|
||||
*lock = unowned;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_clh_locked(struct ck_spinlock_clh **queue)
|
||||
{
|
||||
struct ck_spinlock_clh *head;
|
||||
bool r;
|
||||
|
||||
head = ck_pr_load_ptr(queue);
|
||||
r = ck_pr_load_uint(&head->wait);
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thread)
|
||||
{
|
||||
struct ck_spinlock_clh *previous;
|
||||
|
||||
/* Indicate to the next thread on queue that they will have to block. */
|
||||
thread->wait = true;
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
/*
|
||||
* Mark current request as last request. Save reference to previous
|
||||
* request.
|
||||
*/
|
||||
previous = ck_pr_fas_ptr(queue, thread);
|
||||
thread->previous = previous;
|
||||
|
||||
/* Wait until previous thread is done with lock. */
|
||||
ck_pr_fence_load();
|
||||
while (ck_pr_load_uint(&previous->wait) == true)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
|
||||
{
|
||||
struct ck_spinlock_clh *previous;
|
||||
|
||||
/*
|
||||
* If there are waiters, they are spinning on the current node wait
|
||||
* flag. The flag is cleared so that the successor may complete an
|
||||
* acquisition. If the caller is pre-empted then the predecessor field
|
||||
* may be updated by a successor's lock operation. In order to avoid
|
||||
* this, save a copy of the predecessor before setting the flag.
|
||||
*/
|
||||
previous = thread[0]->previous;
|
||||
|
||||
/*
|
||||
* We have to pay this cost anyways, use it as a compiler barrier too.
|
||||
*/
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&(*thread)->wait, false);
|
||||
|
||||
/*
|
||||
* Predecessor is guaranteed not to be spinning on previous request,
|
||||
* so update caller to use previous structure. This allows successor
|
||||
* all the time in the world to successfully read updated wait flag.
|
||||
*/
|
||||
*thread = previous;
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_SPINLOCK_CLH */
|
||||
#endif /* CK_SPINLOCK_CLH_H */
|
143
sys/contrib/ck/include/spinlock/dec.h
Normal file
143
sys/contrib/ck/include/spinlock/dec.h
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_DEC_H
|
||||
#define CK_SPINLOCK_DEC_H
|
||||
|
||||
#include <ck_backoff.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_elide.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_DEC
|
||||
#define CK_F_SPINLOCK_DEC
|
||||
/*
|
||||
* This is similar to the CACAS lock but makes use of an atomic decrement
|
||||
* operation to check if the lock value was decremented to 0 from 1. The
|
||||
* idea is that a decrement operation is cheaper than a compare-and-swap.
|
||||
*/
|
||||
struct ck_spinlock_dec {
|
||||
unsigned int value;
|
||||
};
|
||||
typedef struct ck_spinlock_dec ck_spinlock_dec_t;
|
||||
|
||||
#define CK_SPINLOCK_DEC_INITIALIZER {1}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_dec_init(struct ck_spinlock_dec *lock)
|
||||
{
|
||||
|
||||
lock->value = 1;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock)
|
||||
{
|
||||
unsigned int value;
|
||||
|
||||
value = ck_pr_fas_uint(&lock->value, 0);
|
||||
ck_pr_fence_lock();
|
||||
return value == 1;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_dec_locked(struct ck_spinlock_dec *lock)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_load_uint(&lock->value) != 1;
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_dec_lock(struct ck_spinlock_dec *lock)
|
||||
{
|
||||
bool r;
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Only one thread is guaranteed to decrement lock to 0.
|
||||
* Overflow must be protected against. No more than
|
||||
* UINT_MAX lock requests can happen while the lock is held.
|
||||
*/
|
||||
ck_pr_dec_uint_zero(&lock->value, &r);
|
||||
if (r == true)
|
||||
break;
|
||||
|
||||
/* Load value without generating write cycles. */
|
||||
while (ck_pr_load_uint(&lock->value) != 1)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock)
|
||||
{
|
||||
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
|
||||
bool r;
|
||||
|
||||
for (;;) {
|
||||
ck_pr_dec_uint_zero(&lock->value, &r);
|
||||
if (r == true)
|
||||
break;
|
||||
|
||||
ck_backoff_eb(&backoff);
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
|
||||
/*
|
||||
* Unconditionally set lock value to 1 so someone can decrement lock
|
||||
* to 0.
|
||||
*/
|
||||
ck_pr_store_uint(&lock->value, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
|
||||
ck_spinlock_dec_locked, ck_spinlock_dec_lock,
|
||||
ck_spinlock_dec_locked, ck_spinlock_dec_unlock)
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
|
||||
ck_spinlock_dec_locked, ck_spinlock_dec_trylock)
|
||||
|
||||
#endif /* CK_F_SPINLOCK_DEC */
|
||||
#endif /* CK_SPINLOCK_DEC_H */
|
118
sys/contrib/ck/include/spinlock/fas.h
Normal file
118
sys/contrib/ck/include/spinlock/fas.h
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_FAS_H
|
||||
#define CK_SPINLOCK_FAS_H
|
||||
|
||||
#include <ck_backoff.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_elide.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_FAS
|
||||
#define CK_F_SPINLOCK_FAS
|
||||
|
||||
struct ck_spinlock_fas {
|
||||
unsigned int value;
|
||||
};
|
||||
typedef struct ck_spinlock_fas ck_spinlock_fas_t;
|
||||
|
||||
#define CK_SPINLOCK_FAS_INITIALIZER {false}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_fas_init(struct ck_spinlock_fas *lock)
|
||||
{
|
||||
|
||||
lock->value = false;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock)
|
||||
{
|
||||
bool value;
|
||||
|
||||
value = ck_pr_fas_uint(&lock->value, true);
|
||||
ck_pr_fence_lock();
|
||||
|
||||
return !value;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_fas_locked(struct ck_spinlock_fas *lock)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_load_uint(&lock->value);
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_fas_lock(struct ck_spinlock_fas *lock)
|
||||
{
|
||||
|
||||
while (ck_pr_fas_uint(&lock->value, true) == true) {
|
||||
while (ck_pr_load_uint(&lock->value) == true)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock)
|
||||
{
|
||||
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
|
||||
|
||||
while (ck_pr_fas_uint(&lock->value, true) == true)
|
||||
ck_backoff_eb(&backoff);
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&lock->value, false);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
|
||||
ck_spinlock_fas_locked, ck_spinlock_fas_lock,
|
||||
ck_spinlock_fas_locked, ck_spinlock_fas_unlock)
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
|
||||
ck_spinlock_fas_locked, ck_spinlock_fas_trylock)
|
||||
|
||||
#endif /* CK_F_SPINLOCK_FAS */
|
||||
#endif /* CK_SPINLOCK_FAS_H */
|
145
sys/contrib/ck/include/spinlock/hclh.h
Normal file
145
sys/contrib/ck/include/spinlock/hclh.h
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright 2013-2015 Olivier Houchard
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_HCLH_H
|
||||
#define CK_SPINLOCK_HCLH_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_HCLH
|
||||
#define CK_F_SPINLOCK_HCLH
|
||||
struct ck_spinlock_hclh {
|
||||
unsigned int wait;
|
||||
unsigned int splice;
|
||||
int cluster_id;
|
||||
struct ck_spinlock_hclh *previous;
|
||||
};
|
||||
typedef struct ck_spinlock_hclh ck_spinlock_hclh_t;
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_hclh_init(struct ck_spinlock_hclh **lock,
|
||||
struct ck_spinlock_hclh *unowned,
|
||||
int cluster_id)
|
||||
{
|
||||
|
||||
unowned->previous = NULL;
|
||||
unowned->wait = false;
|
||||
unowned->splice = false;
|
||||
unowned->cluster_id = cluster_id;
|
||||
*lock = unowned;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_hclh_locked(struct ck_spinlock_hclh **queue)
|
||||
{
|
||||
struct ck_spinlock_hclh *head;
|
||||
bool r;
|
||||
|
||||
head = ck_pr_load_ptr(queue);
|
||||
r = ck_pr_load_uint(&head->wait);
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
|
||||
struct ck_spinlock_hclh **local_queue,
|
||||
struct ck_spinlock_hclh *thread)
|
||||
{
|
||||
struct ck_spinlock_hclh *previous, *local_tail;
|
||||
|
||||
/* Indicate to the next thread on queue that they will have to block. */
|
||||
thread->wait = true;
|
||||
thread->splice = false;
|
||||
thread->cluster_id = (*local_queue)->cluster_id;
|
||||
|
||||
/* Serialize with respect to update of local queue. */
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
/* Mark current request as last request. Save reference to previous request. */
|
||||
previous = ck_pr_fas_ptr(local_queue, thread);
|
||||
thread->previous = previous;
|
||||
|
||||
/* Wait until previous thread from the local queue is done with lock. */
|
||||
ck_pr_fence_load();
|
||||
if (previous->previous != NULL &&
|
||||
previous->cluster_id == thread->cluster_id) {
|
||||
while (ck_pr_load_uint(&previous->wait) == true)
|
||||
ck_pr_stall();
|
||||
|
||||
/* We're head of the global queue, we're done */
|
||||
if (ck_pr_load_uint(&previous->splice) == false)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Now we need to splice the local queue into the global queue. */
|
||||
local_tail = ck_pr_load_ptr(local_queue);
|
||||
previous = ck_pr_fas_ptr(glob_queue, local_tail);
|
||||
|
||||
ck_pr_store_uint(&local_tail->splice, true);
|
||||
|
||||
/* Wait until previous thread from the global queue is done with lock. */
|
||||
while (ck_pr_load_uint(&previous->wait) == true)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
|
||||
{
|
||||
struct ck_spinlock_hclh *previous;
|
||||
|
||||
/*
|
||||
* If there are waiters, they are spinning on the current node wait
|
||||
* flag. The flag is cleared so that the successor may complete an
|
||||
* acquisition. If the caller is pre-empted then the predecessor field
|
||||
* may be updated by a successor's lock operation. In order to avoid
|
||||
* this, save a copy of the predecessor before setting the flag.
|
||||
*/
|
||||
previous = thread[0]->previous;
|
||||
|
||||
/* We have to pay this cost anyways, use it as a compiler barrier too. */
|
||||
ck_pr_fence_unlock();
|
||||
ck_pr_store_uint(&(*thread)->wait, false);
|
||||
|
||||
/*
|
||||
* Predecessor is guaranteed not to be spinning on previous request,
|
||||
* so update caller to use previous structure. This allows successor
|
||||
* all the time in the world to successfully read updated wait flag.
|
||||
*/
|
||||
*thread = previous;
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_SPINLOCK_HCLH */
|
||||
#endif /* CK_SPINLOCK_HCLH_H */
|
155
sys/contrib/ck/include/spinlock/mcs.h
Normal file
155
sys/contrib/ck/include/spinlock/mcs.h
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_MCS_H
|
||||
#define CK_SPINLOCK_MCS_H
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_MCS
|
||||
#define CK_F_SPINLOCK_MCS
|
||||
|
||||
struct ck_spinlock_mcs {
|
||||
unsigned int locked;
|
||||
struct ck_spinlock_mcs *next;
|
||||
};
|
||||
typedef struct ck_spinlock_mcs * ck_spinlock_mcs_t;
|
||||
typedef struct ck_spinlock_mcs ck_spinlock_mcs_context_t;
|
||||
|
||||
#define CK_SPINLOCK_MCS_INITIALIZER (NULL)
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_mcs_init(struct ck_spinlock_mcs **queue)
|
||||
{
|
||||
|
||||
*queue = NULL;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue,
|
||||
struct ck_spinlock_mcs *node)
|
||||
{
|
||||
bool r;
|
||||
|
||||
node->locked = true;
|
||||
node->next = NULL;
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
r = ck_pr_cas_ptr(queue, NULL, node);
|
||||
ck_pr_fence_lock();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_mcs_locked(struct ck_spinlock_mcs **queue)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_load_ptr(queue) != NULL;
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue,
|
||||
struct ck_spinlock_mcs *node)
|
||||
{
|
||||
struct ck_spinlock_mcs *previous;
|
||||
|
||||
/*
|
||||
* In the case that there is a successor, let them know they must
|
||||
* wait for us to unlock.
|
||||
*/
|
||||
node->locked = true;
|
||||
node->next = NULL;
|
||||
ck_pr_fence_store_atomic();
|
||||
|
||||
/*
|
||||
* Swap current tail with current lock request. If the swap operation
|
||||
* returns NULL, it means the queue was empty. If the queue was empty,
|
||||
* then the operation is complete.
|
||||
*/
|
||||
previous = ck_pr_fas_ptr(queue, node);
|
||||
if (previous != NULL) {
|
||||
/*
|
||||
* Let the previous lock holder know that we are waiting on
|
||||
* them.
|
||||
*/
|
||||
ck_pr_store_ptr(&previous->next, node);
|
||||
while (ck_pr_load_uint(&node->locked) == true)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue,
|
||||
struct ck_spinlock_mcs *node)
|
||||
{
|
||||
struct ck_spinlock_mcs *next;
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
|
||||
next = ck_pr_load_ptr(&node->next);
|
||||
if (next == NULL) {
|
||||
/*
|
||||
* If there is no request following us then it is a possibilty
|
||||
* that we are the current tail. In this case, we may just
|
||||
* mark the spinlock queue as empty.
|
||||
*/
|
||||
if (ck_pr_load_ptr(queue) == node &&
|
||||
ck_pr_cas_ptr(queue, node, NULL) == true) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the node is not the current tail then a lock operation
|
||||
* is in-progress. In this case, busy-wait until the queue is
|
||||
* in a consistent state to wake up the incoming lock
|
||||
* request.
|
||||
*/
|
||||
for (;;) {
|
||||
next = ck_pr_load_ptr(&node->next);
|
||||
if (next != NULL)
|
||||
break;
|
||||
|
||||
ck_pr_stall();
|
||||
}
|
||||
}
|
||||
|
||||
/* Allow the next lock operation to complete. */
|
||||
ck_pr_store_uint(&next->locked, false);
|
||||
return;
|
||||
}
|
||||
#endif /* CK_F_SPINLOCK_MCS */
|
||||
#endif /* CK_SPINLOCK_MCS_H */
|
296
sys/contrib/ck/include/spinlock/ticket.h
Normal file
296
sys/contrib/ck/include/spinlock/ticket.h
Normal file
@ -0,0 +1,296 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_SPINLOCK_TICKET_H
|
||||
#define CK_SPINLOCK_TICKET_H
|
||||
|
||||
#include <ck_backoff.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_elide.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
#ifndef CK_F_SPINLOCK_TICKET
|
||||
#define CK_F_SPINLOCK_TICKET
|
||||
/*
|
||||
* If 16-bit or 32-bit increment is supported, implement support for
|
||||
* trylock functionality on availability of 32-bit or 64-bit fetch-and-add
|
||||
* and compare-and-swap. This code path is only applied to x86*.
|
||||
*/
|
||||
#if defined(CK_MD_TSO) && (defined(__x86__) || defined(__x86_64__))
|
||||
#if defined(CK_F_PR_FAA_32) && defined(CK_F_PR_INC_16) && defined(CK_F_PR_CAS_32)
|
||||
#define CK_SPINLOCK_TICKET_TYPE uint32_t
|
||||
#define CK_SPINLOCK_TICKET_TYPE_BASE uint16_t
|
||||
#define CK_SPINLOCK_TICKET_INC(x) ck_pr_inc_16(x)
|
||||
#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_32(x, y, z)
|
||||
#define CK_SPINLOCK_TICKET_FAA(x, y) ck_pr_faa_32(x, y)
|
||||
#define CK_SPINLOCK_TICKET_LOAD(x) ck_pr_load_32(x)
|
||||
#define CK_SPINLOCK_TICKET_INCREMENT (0x00010000UL)
|
||||
#define CK_SPINLOCK_TICKET_MASK (0xFFFFUL)
|
||||
#define CK_SPINLOCK_TICKET_SHIFT (16)
|
||||
#elif defined(CK_F_PR_FAA_64) && defined(CK_F_PR_INC_32) && defined(CK_F_PR_CAS_64)
|
||||
#define CK_SPINLOCK_TICKET_TYPE uint64_t
|
||||
#define CK_SPINLOCK_TICKET_TYPE_BASE uint32_t
|
||||
#define CK_SPINLOCK_TICKET_INC(x) ck_pr_inc_32(x)
|
||||
#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_64(x, y, z)
|
||||
#define CK_SPINLOCK_TICKET_FAA(x, y) ck_pr_faa_64(x, y)
|
||||
#define CK_SPINLOCK_TICKET_LOAD(x) ck_pr_load_64(x)
|
||||
#define CK_SPINLOCK_TICKET_INCREMENT (0x0000000100000000ULL)
|
||||
#define CK_SPINLOCK_TICKET_MASK (0xFFFFFFFFULL)
|
||||
#define CK_SPINLOCK_TICKET_SHIFT (32)
|
||||
#endif
|
||||
#endif /* CK_MD_TSO */
|
||||
|
||||
#if defined(CK_SPINLOCK_TICKET_TYPE)
|
||||
#define CK_F_SPINLOCK_TICKET_TRYLOCK
|
||||
|
||||
struct ck_spinlock_ticket {
|
||||
CK_SPINLOCK_TICKET_TYPE value;
|
||||
};
|
||||
typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
|
||||
#define CK_SPINLOCK_TICKET_INITIALIZER { .value = 0 }
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
|
||||
ticket->value = 0;
|
||||
ck_pr_barrier();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
CK_SPINLOCK_TICKET_TYPE request, position;
|
||||
|
||||
request = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
|
||||
position = request & CK_SPINLOCK_TICKET_MASK;
|
||||
request >>= CK_SPINLOCK_TICKET_SHIFT;
|
||||
|
||||
ck_pr_fence_acquire();
|
||||
return request != position;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
CK_SPINLOCK_TICKET_TYPE request, position;
|
||||
|
||||
/* Get our ticket number and set next ticket number. */
|
||||
request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
|
||||
CK_SPINLOCK_TICKET_INCREMENT);
|
||||
|
||||
position = request & CK_SPINLOCK_TICKET_MASK;
|
||||
request >>= CK_SPINLOCK_TICKET_SHIFT;
|
||||
|
||||
while (request != position) {
|
||||
ck_pr_stall();
|
||||
position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
|
||||
CK_SPINLOCK_TICKET_MASK;
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
|
||||
{
|
||||
CK_SPINLOCK_TICKET_TYPE request, position;
|
||||
ck_backoff_t backoff;
|
||||
|
||||
/* Get our ticket number and set next ticket number. */
|
||||
request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
|
||||
CK_SPINLOCK_TICKET_INCREMENT);
|
||||
|
||||
position = request & CK_SPINLOCK_TICKET_MASK;
|
||||
request >>= CK_SPINLOCK_TICKET_SHIFT;
|
||||
|
||||
while (request != position) {
|
||||
ck_pr_stall();
|
||||
position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
|
||||
CK_SPINLOCK_TICKET_MASK;
|
||||
|
||||
backoff = (request - position) & CK_SPINLOCK_TICKET_MASK;
|
||||
backoff <<= c;
|
||||
ck_backoff_eb(&backoff);
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
CK_SPINLOCK_TICKET_TYPE snapshot, request, position;
|
||||
|
||||
snapshot = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
|
||||
position = snapshot & CK_SPINLOCK_TICKET_MASK;
|
||||
request = snapshot >> CK_SPINLOCK_TICKET_SHIFT;
|
||||
|
||||
if (position != request)
|
||||
return false;
|
||||
|
||||
if (CK_SPINLOCK_TICKET_CAS(&ticket->value,
|
||||
snapshot, snapshot + CK_SPINLOCK_TICKET_INCREMENT) == false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value);
|
||||
return;
|
||||
}
|
||||
|
||||
#undef CK_SPINLOCK_TICKET_TYPE
|
||||
#undef CK_SPINLOCK_TICKET_TYPE_BASE
|
||||
#undef CK_SPINLOCK_TICKET_INC
|
||||
#undef CK_SPINLOCK_TICKET_FAA
|
||||
#undef CK_SPINLOCK_TICKET_LOAD
|
||||
#undef CK_SPINLOCK_TICKET_INCREMENT
|
||||
#undef CK_SPINLOCK_TICKET_MASK
|
||||
#undef CK_SPINLOCK_TICKET_SHIFT
|
||||
#else
|
||||
/*
|
||||
* MESI benefits from cacheline padding between next and current. This avoids
|
||||
* invalidation of current from the cache due to incoming lock requests.
|
||||
*/
|
||||
struct ck_spinlock_ticket {
|
||||
unsigned int next;
|
||||
unsigned int position;
|
||||
};
|
||||
typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
|
||||
|
||||
#define CK_SPINLOCK_TICKET_INITIALIZER {.next = 0, .position = 0}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
|
||||
ticket->next = 0;
|
||||
ticket->position = 0;
|
||||
ck_pr_barrier();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
bool r;
|
||||
|
||||
r = ck_pr_load_uint(&ticket->position) !=
|
||||
ck_pr_load_uint(&ticket->next);
|
||||
ck_pr_fence_acquire();
|
||||
return r;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
unsigned int request;
|
||||
|
||||
/* Get our ticket number and set next ticket number. */
|
||||
request = ck_pr_faa_uint(&ticket->next, 1);
|
||||
|
||||
/*
|
||||
* Busy-wait until our ticket number is current.
|
||||
* We can get away without a fence here assuming
|
||||
* our position counter does not overflow.
|
||||
*/
|
||||
while (ck_pr_load_uint(&ticket->position) != request)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
|
||||
{
|
||||
ck_backoff_t backoff;
|
||||
unsigned int request, position;
|
||||
|
||||
request = ck_pr_faa_uint(&ticket->next, 1);
|
||||
|
||||
for (;;) {
|
||||
position = ck_pr_load_uint(&ticket->position);
|
||||
if (position == request)
|
||||
break;
|
||||
|
||||
backoff = request - position;
|
||||
backoff <<= c;
|
||||
|
||||
/*
|
||||
* Ideally, back-off from generating cache traffic for at least
|
||||
* the amount of time necessary for the number of pending lock
|
||||
* acquisition and relinquish operations (assuming an empty
|
||||
* critical section).
|
||||
*/
|
||||
ck_backoff_eb(&backoff);
|
||||
}
|
||||
|
||||
ck_pr_fence_lock();
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
|
||||
{
|
||||
unsigned int update;
|
||||
|
||||
ck_pr_fence_unlock();
|
||||
|
||||
/*
|
||||
* Update current ticket value so next lock request can proceed.
|
||||
* Overflow behavior is assumed to be roll-over, in which case,
|
||||
* it is only an issue if there are 2^32 pending lock requests.
|
||||
*/
|
||||
update = ck_pr_load_uint(&ticket->position);
|
||||
ck_pr_store_uint(&ticket->position, update + 1);
|
||||
return;
|
||||
}
|
||||
#endif /* !CK_F_SPINLOCK_TICKET_TRYLOCK */
|
||||
|
||||
CK_ELIDE_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
|
||||
ck_spinlock_ticket_locked, ck_spinlock_ticket_lock,
|
||||
ck_spinlock_ticket_locked, ck_spinlock_ticket_unlock)
|
||||
|
||||
CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
|
||||
ck_spinlock_ticket_locked, ck_spinlock_ticket_trylock)
|
||||
|
||||
#endif /* CK_F_SPINLOCK_TICKET */
|
||||
#endif /* CK_SPINLOCK_TICKET_H */
|
64
sys/contrib/ck/src/Makefile.in
Normal file
64
sys/contrib/ck/src/Makefile.in
Normal file
@ -0,0 +1,64 @@
|
||||
.PHONY: clean distribution
|
||||
|
||||
include @BUILD_DIR@/build/ck.build
|
||||
|
||||
TARGET_DIR=$(BUILD_DIR)/src
|
||||
SDIR=$(SRC_DIR)/src
|
||||
INCLUDE_DIR=$(SRC_DIR)/include
|
||||
|
||||
OBJECTS=ck_barrier_centralized.o \
|
||||
ck_barrier_combining.o \
|
||||
ck_barrier_dissemination.o \
|
||||
ck_barrier_tournament.o \
|
||||
ck_barrier_mcs.o \
|
||||
ck_epoch.o \
|
||||
ck_ht.o \
|
||||
ck_hp.o \
|
||||
ck_hs.o \
|
||||
ck_rhs.o \
|
||||
ck_array.o
|
||||
|
||||
all: $(ALL_LIBS)
|
||||
|
||||
libck.so: $(OBJECTS)
|
||||
$(LD) $(LDFLAGS) -o $(TARGET_DIR)/libck.so $(OBJECTS)
|
||||
|
||||
libck.a: $(OBJECTS)
|
||||
ar rcs $(TARGET_DIR)/libck.a $(OBJECTS)
|
||||
|
||||
ck_array.o: $(INCLUDE_DIR)/ck_array.h $(SDIR)/ck_array.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_array.o $(SDIR)/ck_array.c
|
||||
|
||||
ck_epoch.o: $(INCLUDE_DIR)/ck_epoch.h $(SDIR)/ck_epoch.c $(INCLUDE_DIR)/ck_stack.h
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_epoch.o $(SDIR)/ck_epoch.c
|
||||
|
||||
ck_hs.o: $(INCLUDE_DIR)/ck_hs.h $(SDIR)/ck_hs.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_hs.o $(SDIR)/ck_hs.c
|
||||
|
||||
ck_rhs.o: $(INCLUDE_DIR)/ck_rhs.h $(SDIR)/ck_rhs.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_rhs.o $(SDIR)/ck_rhs.c
|
||||
|
||||
ck_ht.o: $(INCLUDE_DIR)/ck_ht.h $(SDIR)/ck_ht.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_ht.o $(SDIR)/ck_ht.c
|
||||
|
||||
ck_hp.o: $(SDIR)/ck_hp.c $(INCLUDE_DIR)/ck_hp.h $(INCLUDE_DIR)/ck_stack.h
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_hp.o $(SDIR)/ck_hp.c
|
||||
|
||||
ck_barrier_centralized.o: $(SDIR)/ck_barrier_centralized.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_centralized.o $(SDIR)/ck_barrier_centralized.c
|
||||
|
||||
ck_barrier_combining.o: $(SDIR)/ck_barrier_combining.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_combining.o $(SDIR)/ck_barrier_combining.c
|
||||
|
||||
ck_barrier_dissemination.o: $(SDIR)/ck_barrier_dissemination.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_dissemination.o $(SDIR)/ck_barrier_dissemination.c
|
||||
|
||||
ck_barrier_tournament.o: $(SDIR)/ck_barrier_tournament.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_tournament.o $(SDIR)/ck_barrier_tournament.c
|
||||
|
||||
ck_barrier_mcs.o: $(SDIR)/ck_barrier_mcs.c
|
||||
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_mcs.o $(SDIR)/ck_barrier_mcs.c
|
||||
|
||||
clean:
|
||||
rm -rf $(TARGET_DIR)/*.dSYM $(TARGET_DIR)/*~ $(TARGET_DIR)/*.o \
|
||||
$(OBJECTS) $(TARGET_DIR)/libck.a $(TARGET_DIR)/libck.so
|
240
sys/contrib/ck/src/ck_array.c
Normal file
240
sys/contrib/ck/src/ck_array.c
Normal file
@ -0,0 +1,240 @@
|
||||
/*
|
||||
* Copyright 2013-2015 Samy Al Bahra
|
||||
* Copyright 2013-2014 AppNexus, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <ck_array.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
static struct _ck_array *
|
||||
ck_array_create(struct ck_malloc *allocator, unsigned int length)
|
||||
{
|
||||
struct _ck_array *active;
|
||||
|
||||
active = allocator->malloc(sizeof(struct _ck_array) + sizeof(void *) * length);
|
||||
if (active == NULL)
|
||||
return NULL;
|
||||
|
||||
active->n_committed = 0;
|
||||
active->length = length;
|
||||
|
||||
return active;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_array_init(struct ck_array *array, unsigned int mode, struct ck_malloc *allocator, unsigned int length)
|
||||
{
|
||||
struct _ck_array *active;
|
||||
|
||||
(void)mode;
|
||||
|
||||
if (allocator->realloc == NULL ||
|
||||
allocator->malloc == NULL ||
|
||||
allocator->free == NULL ||
|
||||
length == 0)
|
||||
return false;
|
||||
|
||||
active = ck_array_create(allocator, length);
|
||||
if (active == NULL)
|
||||
return false;
|
||||
|
||||
array->n_entries = 0;
|
||||
array->allocator = allocator;
|
||||
array->active = active;
|
||||
array->transaction = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_array_put(struct ck_array *array, void *value)
|
||||
{
|
||||
struct _ck_array *target;
|
||||
unsigned int size;
|
||||
|
||||
/*
|
||||
* If no transaction copy has been necessary, attempt to do in-place
|
||||
* modification of the array.
|
||||
*/
|
||||
if (array->transaction == NULL) {
|
||||
target = array->active;
|
||||
|
||||
if (array->n_entries == target->length) {
|
||||
size = target->length << 1;
|
||||
|
||||
target = array->allocator->realloc(target,
|
||||
sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
|
||||
sizeof(struct _ck_array) + sizeof(void *) * size,
|
||||
true);
|
||||
|
||||
if (target == NULL)
|
||||
return false;
|
||||
|
||||
ck_pr_store_uint(&target->length, size);
|
||||
|
||||
/* Serialize with respect to contents. */
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_ptr(&array->active, target);
|
||||
}
|
||||
|
||||
target->values[array->n_entries++] = value;
|
||||
return true;
|
||||
}
|
||||
|
||||
target = array->transaction;
|
||||
if (array->n_entries == target->length) {
|
||||
size = target->length << 1;
|
||||
|
||||
target = array->allocator->realloc(target,
|
||||
sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
|
||||
sizeof(struct _ck_array) + sizeof(void *) * size,
|
||||
true);
|
||||
|
||||
if (target == NULL)
|
||||
return false;
|
||||
|
||||
target->length = size;
|
||||
array->transaction = target;
|
||||
}
|
||||
|
||||
target->values[array->n_entries++] = value;
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
ck_array_put_unique(struct ck_array *array, void *value)
|
||||
{
|
||||
unsigned int i, limit;
|
||||
void **v;
|
||||
|
||||
limit = array->n_entries;
|
||||
if (array->transaction != NULL) {
|
||||
v = array->transaction->values;
|
||||
} else {
|
||||
v = array->active->values;
|
||||
}
|
||||
|
||||
for (i = 0; i < limit; i++) {
|
||||
if (v[i] == value)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return -!ck_array_put(array, value);
|
||||
}
|
||||
|
||||
bool
|
||||
ck_array_remove(struct ck_array *array, void *value)
|
||||
{
|
||||
struct _ck_array *target;
|
||||
unsigned int i;
|
||||
|
||||
if (array->transaction != NULL) {
|
||||
target = array->transaction;
|
||||
|
||||
for (i = 0; i < array->n_entries; i++) {
|
||||
if (target->values[i] == value) {
|
||||
target->values[i] = target->values[--array->n_entries];
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
target = array->active;
|
||||
|
||||
for (i = 0; i < array->n_entries; i++) {
|
||||
if (target->values[i] == value)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == array->n_entries)
|
||||
return false;
|
||||
|
||||
/* If there are pending additions, immediately eliminate the operation. */
|
||||
if (target->n_committed != array->n_entries) {
|
||||
ck_pr_store_ptr(&target->values[i], target->values[--array->n_entries]);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The assumption is that these allocations are small to begin with.
|
||||
* If there is no immediate opportunity for transaction, allocate a
|
||||
* transactional array which will be applied upon commit time.
|
||||
*/
|
||||
target = ck_array_create(array->allocator, array->n_entries);
|
||||
if (target == NULL)
|
||||
return false;
|
||||
|
||||
memcpy(target->values, array->active->values, sizeof(void *) * array->n_entries);
|
||||
target->length = array->n_entries;
|
||||
target->n_committed = array->n_entries;
|
||||
target->values[i] = target->values[--array->n_entries];
|
||||
|
||||
array->transaction = target;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_array_commit(ck_array_t *array)
|
||||
{
|
||||
struct _ck_array *m = array->transaction;
|
||||
|
||||
if (m != NULL) {
|
||||
struct _ck_array *p;
|
||||
|
||||
m->n_committed = array->n_entries;
|
||||
ck_pr_fence_store();
|
||||
p = array->active;
|
||||
ck_pr_store_ptr(&array->active, m);
|
||||
array->allocator->free(p, sizeof(struct _ck_array) +
|
||||
p->length * sizeof(void *), true);
|
||||
array->transaction = NULL;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&array->active->n_committed, array->n_entries);
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ck_array_deinit(struct ck_array *array, bool defer)
|
||||
{
|
||||
|
||||
array->allocator->free(array->active,
|
||||
sizeof(struct _ck_array) + sizeof(void *) * array->active->length, defer);
|
||||
|
||||
if (array->transaction != NULL) {
|
||||
array->allocator->free(array->transaction,
|
||||
sizeof(struct _ck_array) + sizeof(void *) * array->transaction->length, defer);
|
||||
}
|
||||
|
||||
array->transaction = array->active = NULL;
|
||||
return;
|
||||
}
|
59
sys/contrib/ck/src/ck_barrier_centralized.c
Normal file
59
sys/contrib/ck/src/ck_barrier_centralized.c
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <ck_barrier.h>
|
||||
#include <ck_pr.h>
|
||||
|
||||
void
|
||||
ck_barrier_centralized(struct ck_barrier_centralized *barrier,
|
||||
struct ck_barrier_centralized_state *state,
|
||||
unsigned int n_threads)
|
||||
{
|
||||
unsigned int sense, value;
|
||||
|
||||
/*
|
||||
* Every execution context has a sense associated with it.
|
||||
* This sense is reversed when the barrier is entered. Every
|
||||
* thread will spin on the global sense until the last thread
|
||||
* reverses it.
|
||||
*/
|
||||
sense = state->sense = ~state->sense;
|
||||
value = ck_pr_faa_uint(&barrier->value, 1);
|
||||
if (value == n_threads - 1) {
|
||||
ck_pr_store_uint(&barrier->value, 0);
|
||||
ck_pr_fence_memory();
|
||||
ck_pr_store_uint(&barrier->sense, sense);
|
||||
return;
|
||||
}
|
||||
|
||||
ck_pr_fence_atomic_load();
|
||||
while (sense != ck_pr_load_uint(&barrier->sense))
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_fence_acquire();
|
||||
return;
|
||||
}
|
207
sys/contrib/ck/src/ck_barrier_combining.c
Normal file
207
sys/contrib/ck/src/ck_barrier_combining.c
Normal file
@ -0,0 +1,207 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <ck_barrier.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_spinlock.h>
|
||||
|
||||
struct ck_barrier_combining_queue {
|
||||
struct ck_barrier_combining_group *head;
|
||||
struct ck_barrier_combining_group *tail;
|
||||
};
|
||||
|
||||
CK_CC_INLINE static struct ck_barrier_combining_group *
|
||||
ck_barrier_combining_queue_dequeue(struct ck_barrier_combining_queue *queue)
|
||||
{
|
||||
struct ck_barrier_combining_group *front = NULL;
|
||||
|
||||
if (queue->head != NULL) {
|
||||
front = queue->head;
|
||||
queue->head = queue->head->next;
|
||||
}
|
||||
|
||||
return front;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_barrier_combining_insert(struct ck_barrier_combining_group *parent,
|
||||
struct ck_barrier_combining_group *tnode,
|
||||
struct ck_barrier_combining_group **child)
|
||||
{
|
||||
|
||||
*child = tnode;
|
||||
tnode->parent = parent;
|
||||
|
||||
/*
|
||||
* After inserting, we must increment the parent group's count for
|
||||
* number of threads expected to reach it; otherwise, the
|
||||
* barrier may end prematurely.
|
||||
*/
|
||||
parent->k++;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This implementation of software combining tree barriers
|
||||
* uses level order traversal to insert new thread groups
|
||||
* into the barrier's tree. We use a queue to implement this
|
||||
* traversal.
|
||||
*/
|
||||
CK_CC_INLINE static void
|
||||
ck_barrier_combining_queue_enqueue(struct ck_barrier_combining_queue *queue,
|
||||
struct ck_barrier_combining_group *node_value)
|
||||
{
|
||||
|
||||
node_value->next = NULL;
|
||||
if (queue->head == NULL) {
|
||||
queue->head = queue->tail = node_value;
|
||||
return;
|
||||
}
|
||||
|
||||
queue->tail->next = node_value;
|
||||
queue->tail = node_value;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ck_barrier_combining_group_init(struct ck_barrier_combining *root,
|
||||
struct ck_barrier_combining_group *tnode,
|
||||
unsigned int nthr)
|
||||
{
|
||||
struct ck_barrier_combining_group *node;
|
||||
struct ck_barrier_combining_queue queue;
|
||||
|
||||
queue.head = queue.tail = NULL;
|
||||
|
||||
tnode->k = nthr;
|
||||
tnode->count = 0;
|
||||
tnode->sense = 0;
|
||||
tnode->left = tnode->right = NULL;
|
||||
|
||||
/*
|
||||
* Finds the first available node for linkage into the combining
|
||||
* tree. The use of a spinlock is excusable as this is a one-time
|
||||
* initialization cost.
|
||||
*/
|
||||
ck_spinlock_fas_lock(&root->mutex);
|
||||
ck_barrier_combining_queue_enqueue(&queue, root->root);
|
||||
while (queue.head != NULL) {
|
||||
node = ck_barrier_combining_queue_dequeue(&queue);
|
||||
|
||||
/* If the left child is free, link the group there. */
|
||||
if (node->left == NULL) {
|
||||
ck_barrier_combining_insert(node, tnode, &node->left);
|
||||
goto leave;
|
||||
}
|
||||
|
||||
/* If the right child is free, link the group there. */
|
||||
if (node->right == NULL) {
|
||||
ck_barrier_combining_insert(node, tnode, &node->right);
|
||||
goto leave;
|
||||
}
|
||||
|
||||
/*
|
||||
* If unsuccessful, try inserting as a child of the children of the
|
||||
* current node.
|
||||
*/
|
||||
ck_barrier_combining_queue_enqueue(&queue, node->left);
|
||||
ck_barrier_combining_queue_enqueue(&queue, node->right);
|
||||
}
|
||||
|
||||
leave:
|
||||
ck_spinlock_fas_unlock(&root->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_combining_init(struct ck_barrier_combining *root,
|
||||
struct ck_barrier_combining_group *init_root)
|
||||
{
|
||||
|
||||
init_root->k = 0;
|
||||
init_root->count = 0;
|
||||
init_root->sense = 0;
|
||||
init_root->parent = init_root->left = init_root->right = NULL;
|
||||
ck_spinlock_fas_init(&root->mutex);
|
||||
root->root = init_root;
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
|
||||
struct ck_barrier_combining_group *tnode,
|
||||
unsigned int sense)
|
||||
{
|
||||
|
||||
/*
|
||||
* If this is the last thread in the group, it moves on to the parent group.
|
||||
* Otherwise, it spins on this group's sense.
|
||||
*/
|
||||
if (ck_pr_faa_uint(&tnode->count, 1) == tnode->k - 1) {
|
||||
/*
|
||||
* If we are and will be the last thread entering the barrier for the
|
||||
* current group then signal the parent group if one exists.
|
||||
*/
|
||||
if (tnode->parent != NULL)
|
||||
ck_barrier_combining_aux(barrier, tnode->parent, sense);
|
||||
|
||||
/*
|
||||
* Once the thread returns from its parent(s), it reinitializes the group's
|
||||
* arrival count and signals other threads to continue by flipping the group
|
||||
* sense. Order of these operations is not important since we assume a static
|
||||
* number of threads are members of a barrier for the lifetime of the barrier.
|
||||
* Since count is explicitly reinitialized, it is guaranteed that at any point
|
||||
* tnode->count is equivalent to tnode->k if and only if that many threads
|
||||
* are at the barrier.
|
||||
*/
|
||||
ck_pr_store_uint(&tnode->count, 0);
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&tnode->sense, ~tnode->sense);
|
||||
} else {
|
||||
ck_pr_fence_memory();
|
||||
while (sense != ck_pr_load_uint(&tnode->sense))
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_combining(struct ck_barrier_combining *barrier,
|
||||
struct ck_barrier_combining_group *tnode,
|
||||
struct ck_barrier_combining_state *state)
|
||||
{
|
||||
|
||||
ck_barrier_combining_aux(barrier, tnode, state->sense);
|
||||
|
||||
/* Reverse the execution context's sense for the next barrier. */
|
||||
state->sense = ~state->sense;
|
||||
return;
|
||||
}
|
130
sys/contrib/ck/src/ck_barrier_dissemination.c
Normal file
130
sys/contrib/ck/src/ck_barrier_dissemination.c
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <ck_barrier.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_spinlock.h>
|
||||
|
||||
#include "ck_internal.h"
|
||||
|
||||
void
|
||||
ck_barrier_dissemination_init(struct ck_barrier_dissemination *barrier,
|
||||
struct ck_barrier_dissemination_flag **barrier_internal,
|
||||
unsigned int nthr)
|
||||
{
|
||||
unsigned int i, j, k, size, offset;
|
||||
bool p = nthr & (nthr - 1);
|
||||
|
||||
barrier->nthr = nthr;
|
||||
barrier->size = size = ck_internal_log(ck_internal_power_2(nthr));
|
||||
ck_pr_store_uint(&barrier->tid, 0);
|
||||
|
||||
for (i = 0; i < nthr; ++i) {
|
||||
barrier[i].flags[0] = barrier_internal[i];
|
||||
barrier[i].flags[1] = barrier_internal[i] + size;
|
||||
}
|
||||
|
||||
for (i = 0; i < nthr; ++i) {
|
||||
for (k = 0, offset = 1; k < size; ++k, offset <<= 1) {
|
||||
/*
|
||||
* Determine the thread's partner, j, for the current round, k.
|
||||
* Partners are chosen such that by the completion of the barrier,
|
||||
* every thread has been directly (having one of its flag set) or
|
||||
* indirectly (having one of its partners's flags set) signaled
|
||||
* by every other thread in the barrier.
|
||||
*/
|
||||
if (p == false)
|
||||
j = (i + offset) & (nthr - 1);
|
||||
else
|
||||
j = (i + offset) % nthr;
|
||||
|
||||
/* Set the thread's partner for round k. */
|
||||
barrier[i].flags[0][k].pflag = &barrier[j].flags[0][k].tflag;
|
||||
barrier[i].flags[1][k].pflag = &barrier[j].flags[1][k].tflag;
|
||||
|
||||
/* Set the thread's flags to false. */
|
||||
barrier[i].flags[0][k].tflag = barrier[i].flags[1][k].tflag = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_dissemination_subscribe(struct ck_barrier_dissemination *barrier,
|
||||
struct ck_barrier_dissemination_state *state)
|
||||
{
|
||||
|
||||
state->parity = 0;
|
||||
state->sense = ~0;
|
||||
state->tid = ck_pr_faa_uint(&barrier->tid, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
ck_barrier_dissemination_size(unsigned int nthr)
|
||||
{
|
||||
|
||||
return (ck_internal_log(ck_internal_power_2(nthr)) << 1);
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_dissemination(struct ck_barrier_dissemination *barrier,
|
||||
struct ck_barrier_dissemination_state *state)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int size = barrier->size;
|
||||
|
||||
for (i = 0; i < size; ++i) {
|
||||
unsigned int *pflag, *tflag;
|
||||
|
||||
pflag = barrier[state->tid].flags[state->parity][i].pflag;
|
||||
tflag = &barrier[state->tid].flags[state->parity][i].tflag;
|
||||
|
||||
/* Unblock current partner. */
|
||||
ck_pr_store_uint(pflag, state->sense);
|
||||
|
||||
/* Wait until some other thread unblocks this one. */
|
||||
while (ck_pr_load_uint(tflag) != state->sense)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
/*
|
||||
* Dissemination barriers use two sets of flags to prevent race conditions
|
||||
* between successive calls to the barrier. Parity indicates which set will
|
||||
* be used for the next barrier. They also use a sense reversal technique
|
||||
* to avoid re-initialization of the flags for every two calls to the barrier.
|
||||
*/
|
||||
if (state->parity == 1)
|
||||
state->sense = ~state->sense;
|
||||
|
||||
state->parity = 1 - state->parity;
|
||||
|
||||
ck_pr_fence_acquire();
|
||||
return;
|
||||
}
|
141
sys/contrib/ck/src/ck_barrier_mcs.c
Normal file
141
sys/contrib/ck/src/ck_barrier_mcs.c
Normal file
@ -0,0 +1,141 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <ck_barrier.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdbool.h>
|
||||
|
||||
void
|
||||
ck_barrier_mcs_init(struct ck_barrier_mcs *barrier, unsigned int nthr)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
ck_pr_store_uint(&barrier->tid, 0);
|
||||
|
||||
for (i = 0; i < nthr; ++i) {
|
||||
for (j = 0; j < 4; ++j) {
|
||||
/*
|
||||
* If there are still threads that don't have parents,
|
||||
* add it as a child.
|
||||
*/
|
||||
barrier[i].havechild[j] = ((i << 2) + j < nthr - 1) ? ~0 : 0;
|
||||
|
||||
/*
|
||||
* childnotready is initialized to havechild to ensure
|
||||
* a thread does not wait for a child that does not exist.
|
||||
*/
|
||||
barrier[i].childnotready[j] = barrier[i].havechild[j];
|
||||
}
|
||||
|
||||
/* The root thread does not have a parent. */
|
||||
barrier[i].parent = (i == 0) ?
|
||||
&barrier[i].dummy :
|
||||
&barrier[(i - 1) >> 2].childnotready[(i - 1) & 3];
|
||||
|
||||
/* Leaf threads do not have any children. */
|
||||
barrier[i].children[0] = ((i << 1) + 1 >= nthr) ?
|
||||
&barrier[i].dummy :
|
||||
&barrier[(i << 1) + 1].parentsense;
|
||||
|
||||
barrier[i].children[1] = ((i << 1) + 2 >= nthr) ?
|
||||
&barrier[i].dummy :
|
||||
&barrier[(i << 1) + 2].parentsense;
|
||||
|
||||
barrier[i].parentsense = 0;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_mcs_subscribe(struct ck_barrier_mcs *barrier, struct ck_barrier_mcs_state *state)
|
||||
{
|
||||
|
||||
state->sense = ~0;
|
||||
state->vpid = ck_pr_faa_uint(&barrier->tid, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_barrier_mcs_check_children(unsigned int *childnotready)
|
||||
{
|
||||
|
||||
if (ck_pr_load_uint(&childnotready[0]) != 0)
|
||||
return false;
|
||||
if (ck_pr_load_uint(&childnotready[1]) != 0)
|
||||
return false;
|
||||
if (ck_pr_load_uint(&childnotready[2]) != 0)
|
||||
return false;
|
||||
if (ck_pr_load_uint(&childnotready[3]) != 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void
|
||||
ck_barrier_mcs_reinitialize_children(struct ck_barrier_mcs *node)
|
||||
{
|
||||
|
||||
ck_pr_store_uint(&node->childnotready[0], node->havechild[0]);
|
||||
ck_pr_store_uint(&node->childnotready[1], node->havechild[1]);
|
||||
ck_pr_store_uint(&node->childnotready[2], node->havechild[2]);
|
||||
ck_pr_store_uint(&node->childnotready[3], node->havechild[3]);
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_mcs(struct ck_barrier_mcs *barrier,
|
||||
struct ck_barrier_mcs_state *state)
|
||||
{
|
||||
|
||||
/*
|
||||
* Wait until all children have reached the barrier and are done waiting
|
||||
* for their children.
|
||||
*/
|
||||
while (ck_barrier_mcs_check_children(barrier[state->vpid].childnotready) == false)
|
||||
ck_pr_stall();
|
||||
|
||||
/* Reinitialize for next barrier. */
|
||||
ck_barrier_mcs_reinitialize_children(&barrier[state->vpid]);
|
||||
|
||||
/* Inform parent thread and its children have arrived at the barrier. */
|
||||
ck_pr_store_uint(barrier[state->vpid].parent, 0);
|
||||
|
||||
/* Wait until parent indicates all threads have arrived at the barrier. */
|
||||
if (state->vpid != 0) {
|
||||
while (ck_pr_load_uint(&barrier[state->vpid].parentsense) != state->sense)
|
||||
ck_pr_stall();
|
||||
}
|
||||
|
||||
/* Inform children of successful barrier. */
|
||||
ck_pr_store_uint(barrier[state->vpid].children[0], state->sense);
|
||||
ck_pr_store_uint(barrier[state->vpid].children[1], state->sense);
|
||||
state->sense = ~state->sense;
|
||||
ck_pr_fence_memory();
|
||||
return;
|
||||
}
|
184
sys/contrib/ck/src/ck_barrier_tournament.c
Normal file
184
sys/contrib/ck/src/ck_barrier_tournament.c
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <ck_barrier.h>
|
||||
#include <ck_pr.h>
|
||||
|
||||
#include "ck_internal.h"
|
||||
|
||||
/*
|
||||
* This is a tournament barrier implementation. Threads are statically
|
||||
* assigned roles to perform for each round of the barrier. Winners
|
||||
* move on to the next round, while losers spin in their current rounds
|
||||
* on their own flags. During the last round, the champion of the tournament
|
||||
* sets the last flag that begins the wakeup process.
|
||||
*/
|
||||
|
||||
enum {
|
||||
CK_BARRIER_TOURNAMENT_BYE,
|
||||
CK_BARRIER_TOURNAMENT_CHAMPION,
|
||||
CK_BARRIER_TOURNAMENT_DROPOUT,
|
||||
CK_BARRIER_TOURNAMENT_LOSER,
|
||||
CK_BARRIER_TOURNAMENT_WINNER
|
||||
};
|
||||
|
||||
void
|
||||
ck_barrier_tournament_subscribe(struct ck_barrier_tournament *barrier,
|
||||
struct ck_barrier_tournament_state *state)
|
||||
{
|
||||
|
||||
state->sense = ~0;
|
||||
state->vpid = ck_pr_faa_uint(&barrier->tid, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_tournament_init(struct ck_barrier_tournament *barrier,
|
||||
struct ck_barrier_tournament_round **rounds,
|
||||
unsigned int nthr)
|
||||
{
|
||||
unsigned int i, k, size, twok, twokm1, imod2k;
|
||||
|
||||
ck_pr_store_uint(&barrier->tid, 0);
|
||||
barrier->size = size = ck_barrier_tournament_size(nthr);
|
||||
|
||||
for (i = 0; i < nthr; ++i) {
|
||||
/* The first role is always CK_BARRIER_TOURNAMENT_DROPOUT. */
|
||||
rounds[i][0].flag = 0;
|
||||
rounds[i][0].role = CK_BARRIER_TOURNAMENT_DROPOUT;
|
||||
for (k = 1, twok = 2, twokm1 = 1; k < size; ++k, twokm1 = twok, twok <<= 1) {
|
||||
rounds[i][k].flag = 0;
|
||||
|
||||
imod2k = i & (twok - 1);
|
||||
if (imod2k == 0) {
|
||||
if ((i + twokm1 < nthr) && (twok < nthr))
|
||||
rounds[i][k].role = CK_BARRIER_TOURNAMENT_WINNER;
|
||||
else if (i + twokm1 >= nthr)
|
||||
rounds[i][k].role = CK_BARRIER_TOURNAMENT_BYE;
|
||||
}
|
||||
|
||||
if (imod2k == twokm1)
|
||||
rounds[i][k].role = CK_BARRIER_TOURNAMENT_LOSER;
|
||||
else if ((i == 0) && (twok >= nthr))
|
||||
rounds[i][k].role = CK_BARRIER_TOURNAMENT_CHAMPION;
|
||||
|
||||
if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_LOSER)
|
||||
rounds[i][k].opponent = &rounds[i - twokm1][k].flag;
|
||||
else if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_WINNER ||
|
||||
rounds[i][k].role == CK_BARRIER_TOURNAMENT_CHAMPION)
|
||||
rounds[i][k].opponent = &rounds[i + twokm1][k].flag;
|
||||
}
|
||||
}
|
||||
|
||||
ck_pr_store_ptr(&barrier->rounds, rounds);
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned int
|
||||
ck_barrier_tournament_size(unsigned int nthr)
|
||||
{
|
||||
|
||||
return (ck_internal_log(ck_internal_power_2(nthr)) + 1);
|
||||
}
|
||||
|
||||
void
|
||||
ck_barrier_tournament(struct ck_barrier_tournament *barrier,
|
||||
struct ck_barrier_tournament_state *state)
|
||||
{
|
||||
struct ck_barrier_tournament_round **rounds = ck_pr_load_ptr(&barrier->rounds);
|
||||
int round = 1;
|
||||
|
||||
if (barrier->size == 1)
|
||||
return;
|
||||
|
||||
for (;; ++round) {
|
||||
switch (rounds[state->vpid][round].role) {
|
||||
case CK_BARRIER_TOURNAMENT_BYE:
|
||||
break;
|
||||
case CK_BARRIER_TOURNAMENT_CHAMPION:
|
||||
/*
|
||||
* The CK_BARRIER_TOURNAMENT_CHAMPION waits until it wins the tournament; it then
|
||||
* sets the final flag before the wakeup phase of the barrier.
|
||||
*/
|
||||
while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
|
||||
ck_pr_stall();
|
||||
|
||||
ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
|
||||
goto wakeup;
|
||||
case CK_BARRIER_TOURNAMENT_DROPOUT:
|
||||
/* NOTREACHED */
|
||||
break;
|
||||
case CK_BARRIER_TOURNAMENT_LOSER:
|
||||
/*
|
||||
* CK_BARRIER_TOURNAMENT_LOSERs set the flags of their opponents and wait until
|
||||
* their opponents release them after the tournament is over.
|
||||
*/
|
||||
ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
|
||||
while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
|
||||
ck_pr_stall();
|
||||
|
||||
goto wakeup;
|
||||
case CK_BARRIER_TOURNAMENT_WINNER:
|
||||
/*
|
||||
* CK_BARRIER_TOURNAMENT_WINNERs wait until their current opponent sets their flag; they then
|
||||
* continue to the next round of the tournament.
|
||||
*/
|
||||
while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
|
||||
ck_pr_stall();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
wakeup:
|
||||
for (round -= 1 ;; --round) {
|
||||
switch (rounds[state->vpid][round].role) {
|
||||
case CK_BARRIER_TOURNAMENT_BYE:
|
||||
break;
|
||||
case CK_BARRIER_TOURNAMENT_CHAMPION:
|
||||
/* NOTREACHED */
|
||||
break;
|
||||
case CK_BARRIER_TOURNAMENT_DROPOUT:
|
||||
goto leave;
|
||||
break;
|
||||
case CK_BARRIER_TOURNAMENT_LOSER:
|
||||
/* NOTREACHED */
|
||||
break;
|
||||
case CK_BARRIER_TOURNAMENT_WINNER:
|
||||
/*
|
||||
* Winners inform their old opponents the tournament is over
|
||||
* by setting their flags.
|
||||
*/
|
||||
ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
leave:
|
||||
ck_pr_fence_memory();
|
||||
state->sense = ~state->sense;
|
||||
return;
|
||||
}
|
544
sys/contrib/ck/src/ck_epoch.c
Normal file
544
sys/contrib/ck/src/ck_epoch.c
Normal file
@ -0,0 +1,544 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The implementation here is inspired from the work described in:
|
||||
* Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
|
||||
* of Cambridge Computing Laboratory.
|
||||
*/
|
||||
|
||||
#include <ck_backoff.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_epoch.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stack.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
/*
|
||||
* Only three distinct values are used for reclamation, but reclamation occurs
|
||||
* at e+2 rather than e+1. Any thread in a "critical section" would have
|
||||
* acquired some snapshot (e) of the global epoch value (e_g) and set an active
|
||||
* flag. Any hazardous references will only occur after a full memory barrier.
|
||||
* For example, assume an initial e_g value of 1, e value of 0 and active value
|
||||
* of 0.
|
||||
*
|
||||
* ck_epoch_begin(...)
|
||||
* e = e_g
|
||||
* active = 1
|
||||
* memory_barrier();
|
||||
*
|
||||
* Any serialized reads may observe e = 0 or e = 1 with active = 0, or e = 0 or
|
||||
* e = 1 with active = 1. The e_g value can only go from 1 to 2 if every thread
|
||||
* has already observed the value of "1" (or the value we are incrementing
|
||||
* from). This guarantees us that for any given value e_g, any threads with-in
|
||||
* critical sections (referred to as "active" threads from here on) would have
|
||||
* an e value of e_g-1 or e_g. This also means that hazardous references may be
|
||||
* shared in both e_g-1 and e_g even if they are logically deleted in e_g.
|
||||
*
|
||||
* For example, assume all threads have an e value of e_g. Another thread may
|
||||
* increment to e_g to e_g+1. Older threads may have a reference to an object
|
||||
* which is only deleted in e_g+1. It could be that reader threads are
|
||||
* executing some hash table look-ups, while some other writer thread (which
|
||||
* causes epoch counter tick) actually deletes the same items that reader
|
||||
* threads are looking up (this writer thread having an e value of e_g+1).
|
||||
* This is possible if the writer thread re-observes the epoch after the
|
||||
* counter tick.
|
||||
*
|
||||
* Psuedo-code for writer:
|
||||
* ck_epoch_begin()
|
||||
* ht_delete(x)
|
||||
* ck_epoch_end()
|
||||
* ck_epoch_begin()
|
||||
* ht_delete(x)
|
||||
* ck_epoch_end()
|
||||
*
|
||||
* Psuedo-code for reader:
|
||||
* for (;;) {
|
||||
* x = ht_lookup(x)
|
||||
* ck_pr_inc(&x->value);
|
||||
* }
|
||||
*
|
||||
* Of course, it is also possible for references logically deleted at e_g-1 to
|
||||
* still be accessed at e_g as threads are "active" at the same time
|
||||
* (real-world time) mutating shared objects.
|
||||
*
|
||||
* Now, if the epoch counter is ticked to e_g+1, then no new hazardous
|
||||
* references could exist to objects logically deleted at e_g-1. The reason for
|
||||
* this is that at e_g+1, all epoch read-side critical sections started at
|
||||
* e_g-1 must have been completed. If any epoch read-side critical sections at
|
||||
* e_g-1 were still active, then we would never increment to e_g+1 (active != 0
|
||||
* ^ e != e_g). Additionally, e_g may still have hazardous references to
|
||||
* objects logically deleted at e_g-1 which means objects logically deleted at
|
||||
* e_g-1 cannot be deleted at e_g+1 unless all threads have observed e_g+1
|
||||
* (since it is valid for active threads to be at e_g and threads at e_g still
|
||||
* require safe memory accesses).
|
||||
*
|
||||
* However, at e_g+2, all active threads must be either at e_g+1 or e_g+2.
|
||||
* Though e_g+2 may share hazardous references with e_g+1, and e_g+1 shares
|
||||
* hazardous references to e_g, no active threads are at e_g or e_g-1. This
|
||||
* means no hazardous references could exist to objects deleted at e_g-1 (at
|
||||
* e_g+2).
|
||||
*
|
||||
* To summarize these important points,
|
||||
* 1) Active threads will always have a value of e_g or e_g-1.
|
||||
* 2) Items that are logically deleted e_g or e_g-1 cannot be physically
|
||||
* deleted.
|
||||
* 3) Objects logically deleted at e_g-1 can be physically destroyed at e_g+2
|
||||
* or at e_g+1 if no threads are at e_g.
|
||||
*
|
||||
* Last but not least, if we are at e_g+2, then no active thread is at e_g
|
||||
* which means it is safe to apply modulo-3 arithmetic to e_g value in order to
|
||||
* re-use e_g to represent the e_g+3 state. This means it is sufficient to
|
||||
* represent e_g using only the values 0, 1 or 2. Every time a thread re-visits
|
||||
* a e_g (which can be determined with a non-empty deferral list) it can assume
|
||||
* objects in the e_g deferral list involved at least three e_g transitions and
|
||||
* are thus, safe, for physical deletion.
|
||||
*
|
||||
* Blocking semantics for epoch reclamation have additional restrictions.
|
||||
* Though we only require three deferral lists, reasonable blocking semantics
|
||||
* must be able to more gracefully handle bursty write work-loads which could
|
||||
* easily cause e_g wrap-around if modulo-3 arithmetic is used. This allows for
|
||||
* easy-to-trigger live-lock situations. The work-around to this is to not
|
||||
* apply modulo arithmetic to e_g but only to deferral list indexing.
|
||||
*/
|
||||
#define CK_EPOCH_GRACE 3U
|
||||
|
||||
enum {
|
||||
CK_EPOCH_STATE_USED = 0,
|
||||
CK_EPOCH_STATE_FREE = 1
|
||||
};
|
||||
|
||||
CK_STACK_CONTAINER(struct ck_epoch_record, record_next,
|
||||
ck_epoch_record_container)
|
||||
CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
|
||||
ck_epoch_entry_container)
|
||||
|
||||
#define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
|
||||
|
||||
void
|
||||
_ck_epoch_delref(struct ck_epoch_record *record,
|
||||
struct ck_epoch_section *section)
|
||||
{
|
||||
struct ck_epoch_ref *current, *other;
|
||||
unsigned int i = section->bucket;
|
||||
|
||||
current = &record->local.bucket[i];
|
||||
current->count--;
|
||||
|
||||
if (current->count > 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the current bucket no longer has any references, then
|
||||
* determine whether we have already transitioned into a newer
|
||||
* epoch. If so, then make sure to update our shared snapshot
|
||||
* to allow for forward progress.
|
||||
*
|
||||
* If no other active bucket exists, then the record will go
|
||||
* inactive in order to allow for forward progress.
|
||||
*/
|
||||
other = &record->local.bucket[(i + 1) &
|
||||
CK_EPOCH_SENSE_MASK];
|
||||
if (other->count > 0 &&
|
||||
((int)(current->epoch - other->epoch) < 0)) {
|
||||
/*
|
||||
* The other epoch value is actually the newest,
|
||||
* transition to it.
|
||||
*/
|
||||
ck_pr_store_uint(&record->epoch, other->epoch);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
_ck_epoch_addref(struct ck_epoch_record *record,
|
||||
struct ck_epoch_section *section)
|
||||
{
|
||||
struct ck_epoch *global = record->global;
|
||||
struct ck_epoch_ref *ref;
|
||||
unsigned int epoch, i;
|
||||
|
||||
epoch = ck_pr_load_uint(&global->epoch);
|
||||
i = epoch & CK_EPOCH_SENSE_MASK;
|
||||
ref = &record->local.bucket[i];
|
||||
|
||||
if (ref->count++ == 0) {
|
||||
#ifndef CK_MD_TSO
|
||||
struct ck_epoch_ref *previous;
|
||||
|
||||
/*
|
||||
* The system has already ticked. If another non-zero bucket
|
||||
* exists, make sure to order our observations with respect
|
||||
* to it. Otherwise, it is possible to acquire a reference
|
||||
* from the previous epoch generation.
|
||||
*
|
||||
* On TSO architectures, the monoticity of the global counter
|
||||
* and load-{store, load} ordering are sufficient to guarantee
|
||||
* this ordering.
|
||||
*/
|
||||
previous = &record->local.bucket[(i + 1) &
|
||||
CK_EPOCH_SENSE_MASK];
|
||||
if (previous->count > 0)
|
||||
ck_pr_fence_acqrel();
|
||||
#endif /* !CK_MD_TSO */
|
||||
|
||||
/*
|
||||
* If this is this is a new reference into the current
|
||||
* bucket then cache the associated epoch value.
|
||||
*/
|
||||
ref->epoch = epoch;
|
||||
}
|
||||
|
||||
section->bucket = i;
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_init(struct ck_epoch *global)
|
||||
{
|
||||
|
||||
ck_stack_init(&global->records);
|
||||
global->epoch = 1;
|
||||
global->n_free = 0;
|
||||
ck_pr_fence_store();
|
||||
return;
|
||||
}
|
||||
|
||||
struct ck_epoch_record *
|
||||
ck_epoch_recycle(struct ck_epoch *global)
|
||||
{
|
||||
struct ck_epoch_record *record;
|
||||
ck_stack_entry_t *cursor;
|
||||
unsigned int state;
|
||||
|
||||
if (ck_pr_load_uint(&global->n_free) == 0)
|
||||
return NULL;
|
||||
|
||||
CK_STACK_FOREACH(&global->records, cursor) {
|
||||
record = ck_epoch_record_container(cursor);
|
||||
|
||||
if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) {
|
||||
/* Serialize with respect to deferral list clean-up. */
|
||||
ck_pr_fence_load();
|
||||
state = ck_pr_fas_uint(&record->state,
|
||||
CK_EPOCH_STATE_USED);
|
||||
if (state == CK_EPOCH_STATE_FREE) {
|
||||
ck_pr_dec_uint(&global->n_free);
|
||||
return record;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
record->global = global;
|
||||
record->state = CK_EPOCH_STATE_USED;
|
||||
record->active = 0;
|
||||
record->epoch = 0;
|
||||
record->n_dispatch = 0;
|
||||
record->n_peak = 0;
|
||||
record->n_pending = 0;
|
||||
memset(&record->local, 0, sizeof record->local);
|
||||
|
||||
for (i = 0; i < CK_EPOCH_LENGTH; i++)
|
||||
ck_stack_init(&record->pending[i]);
|
||||
|
||||
ck_pr_fence_store();
|
||||
ck_stack_push_upmc(&global->records, &record->record_next);
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_unregister(struct ck_epoch_record *record)
|
||||
{
|
||||
struct ck_epoch *global = record->global;
|
||||
size_t i;
|
||||
|
||||
record->active = 0;
|
||||
record->epoch = 0;
|
||||
record->n_dispatch = 0;
|
||||
record->n_peak = 0;
|
||||
record->n_pending = 0;
|
||||
memset(&record->local, 0, sizeof record->local);
|
||||
|
||||
for (i = 0; i < CK_EPOCH_LENGTH; i++)
|
||||
ck_stack_init(&record->pending[i]);
|
||||
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
|
||||
ck_pr_inc_uint(&global->n_free);
|
||||
return;
|
||||
}
|
||||
|
||||
static struct ck_epoch_record *
|
||||
ck_epoch_scan(struct ck_epoch *global,
|
||||
struct ck_epoch_record *cr,
|
||||
unsigned int epoch,
|
||||
bool *af)
|
||||
{
|
||||
ck_stack_entry_t *cursor;
|
||||
|
||||
*af = false;
|
||||
if (cr == NULL) {
|
||||
cursor = CK_STACK_FIRST(&global->records);
|
||||
} else {
|
||||
cursor = &cr->record_next;
|
||||
}
|
||||
|
||||
while (cursor != NULL) {
|
||||
unsigned int state, active;
|
||||
|
||||
cr = ck_epoch_record_container(cursor);
|
||||
|
||||
state = ck_pr_load_uint(&cr->state);
|
||||
if (state & CK_EPOCH_STATE_FREE) {
|
||||
cursor = CK_STACK_NEXT(cursor);
|
||||
continue;
|
||||
}
|
||||
|
||||
active = ck_pr_load_uint(&cr->active);
|
||||
*af |= active;
|
||||
|
||||
if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch)
|
||||
return cr;
|
||||
|
||||
cursor = CK_STACK_NEXT(cursor);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
|
||||
{
|
||||
unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
|
||||
ck_stack_entry_t *head, *next, *cursor;
|
||||
unsigned int i = 0;
|
||||
|
||||
head = CK_STACK_FIRST(&record->pending[epoch]);
|
||||
ck_stack_init(&record->pending[epoch]);
|
||||
|
||||
for (cursor = head; cursor != NULL; cursor = next) {
|
||||
struct ck_epoch_entry *entry =
|
||||
ck_epoch_entry_container(cursor);
|
||||
|
||||
next = CK_STACK_NEXT(cursor);
|
||||
entry->function(entry);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (record->n_pending > record->n_peak)
|
||||
record->n_peak = record->n_pending;
|
||||
|
||||
record->n_dispatch += i;
|
||||
record->n_pending -= i;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reclaim all objects associated with a record.
|
||||
*/
|
||||
void
|
||||
ck_epoch_reclaim(struct ck_epoch_record *record)
|
||||
{
|
||||
unsigned int epoch;
|
||||
|
||||
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
|
||||
ck_epoch_dispatch(record, epoch);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function must not be called with-in read section.
|
||||
*/
|
||||
void
|
||||
ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
{
|
||||
struct ck_epoch *global = record->global;
|
||||
struct ck_epoch_record *cr;
|
||||
unsigned int delta, epoch, goal, i;
|
||||
bool active;
|
||||
|
||||
ck_pr_fence_memory();
|
||||
|
||||
/*
|
||||
* The observation of the global epoch must be ordered with respect to
|
||||
* all prior operations. The re-ordering of loads is permitted given
|
||||
* monoticity of global epoch counter.
|
||||
*
|
||||
* If UINT_MAX concurrent mutations were to occur then it is possible
|
||||
* to encounter an ABA-issue. If this is a concern, consider tuning
|
||||
* write-side concurrency.
|
||||
*/
|
||||
delta = epoch = ck_pr_load_uint(&global->epoch);
|
||||
goal = epoch + CK_EPOCH_GRACE;
|
||||
|
||||
for (i = 0, cr = NULL; i < CK_EPOCH_GRACE - 1; cr = NULL, i++) {
|
||||
bool r;
|
||||
|
||||
/*
|
||||
* Determine whether all threads have observed the current
|
||||
* epoch with respect to the updates on invocation.
|
||||
*/
|
||||
while (cr = ck_epoch_scan(global, cr, delta, &active),
|
||||
cr != NULL) {
|
||||
unsigned int e_d;
|
||||
|
||||
ck_pr_stall();
|
||||
|
||||
/*
|
||||
* Another writer may have already observed a grace
|
||||
* period.
|
||||
*/
|
||||
e_d = ck_pr_load_uint(&global->epoch);
|
||||
if (e_d != delta) {
|
||||
delta = e_d;
|
||||
goto reload;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have observed all threads as inactive, then we assume
|
||||
* we are at a grace period.
|
||||
*/
|
||||
if (active == false)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Increment current epoch. CAS semantics are used to eliminate
|
||||
* increment operations for synchronization that occurs for the
|
||||
* same global epoch value snapshot.
|
||||
*
|
||||
* If we can guarantee there will only be one active barrier or
|
||||
* epoch tick at a given time, then it is sufficient to use an
|
||||
* increment operation. In a multi-barrier workload, however,
|
||||
* it is possible to overflow the epoch value if we apply
|
||||
* modulo-3 arithmetic.
|
||||
*/
|
||||
r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1,
|
||||
&delta);
|
||||
|
||||
/* Order subsequent thread active checks. */
|
||||
ck_pr_fence_atomic_load();
|
||||
|
||||
/*
|
||||
* If CAS has succeeded, then set delta to latest snapshot.
|
||||
* Otherwise, we have just acquired latest snapshot.
|
||||
*/
|
||||
delta = delta + r;
|
||||
continue;
|
||||
|
||||
reload:
|
||||
if ((goal > epoch) & (delta >= goal)) {
|
||||
/*
|
||||
* Right now, epoch overflow is handled as an edge
|
||||
* case. If we have already observed an epoch
|
||||
* generation, then we can be sure no hazardous
|
||||
* references exist to objects from this generation. We
|
||||
* can actually avoid an addtional scan step at this
|
||||
* point.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* A majority of use-cases will not require full barrier semantics.
|
||||
* However, if non-temporal instructions are used, full barrier
|
||||
* semantics are necessary.
|
||||
*/
|
||||
ck_pr_fence_memory();
|
||||
record->epoch = delta;
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_barrier(struct ck_epoch_record *record)
|
||||
{
|
||||
|
||||
ck_epoch_synchronize(record);
|
||||
ck_epoch_reclaim(record);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It may be worth it to actually apply these deferral semantics to an epoch
|
||||
* that was observed at ck_epoch_call time. The problem is that the latter
|
||||
* would require a full fence.
|
||||
*
|
||||
* ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
|
||||
* There are cases where it will fail to reclaim as early as it could. If this
|
||||
* becomes a problem, we could actually use a heap for epoch buckets but that
|
||||
* is far from ideal too.
|
||||
*/
|
||||
bool
|
||||
ck_epoch_poll(struct ck_epoch_record *record)
|
||||
{
|
||||
bool active;
|
||||
unsigned int epoch;
|
||||
unsigned int snapshot;
|
||||
struct ck_epoch_record *cr = NULL;
|
||||
struct ck_epoch *global = record->global;
|
||||
|
||||
epoch = ck_pr_load_uint(&global->epoch);
|
||||
|
||||
/* Serialize epoch snapshots with respect to global epoch. */
|
||||
ck_pr_fence_memory();
|
||||
cr = ck_epoch_scan(global, cr, epoch, &active);
|
||||
if (cr != NULL) {
|
||||
record->epoch = epoch;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We are at a grace period if all threads are inactive. */
|
||||
if (active == false) {
|
||||
record->epoch = epoch;
|
||||
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
|
||||
ck_epoch_dispatch(record, epoch);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* If an active thread exists, rely on epoch observation. */
|
||||
if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1,
|
||||
&snapshot) == false) {
|
||||
record->epoch = snapshot;
|
||||
} else {
|
||||
record->epoch = epoch + 1;
|
||||
}
|
||||
|
||||
ck_epoch_dispatch(record, epoch + 1);
|
||||
return true;
|
||||
}
|
323
sys/contrib/ck/src/ck_hp.c
Normal file
323
sys/contrib/ck/src/ck_hp.c
Normal file
@ -0,0 +1,323 @@
|
||||
/*
|
||||
* Copyright 2010-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* (c) Copyright 2008, IBM Corporation.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is an implementation of hazard pointers as detailed in:
|
||||
* http://www.research.ibm.com/people/m/michael/ieeetpds-2004.pdf
|
||||
*
|
||||
* This API provides a publishing mechanism that defers destruction of
|
||||
* hazard pointers until it is safe to do so. Preventing arbitrary re-use
|
||||
* protects against the ABA problem and provides safe memory reclamation.
|
||||
* The implementation was derived from the Hazard Pointers implementation
|
||||
* from the Amino CBBS project. It has been heavily modified for Concurrency
|
||||
* Kit.
|
||||
*/
|
||||
|
||||
#include <ck_backoff.h>
|
||||
#include <ck_cc.h>
|
||||
#include <ck_hp.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stack.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_stddef.h>
|
||||
#include <ck_stdlib.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
CK_STACK_CONTAINER(struct ck_hp_record, global_entry, ck_hp_record_container)
|
||||
CK_STACK_CONTAINER(struct ck_hp_hazard, pending_entry, ck_hp_hazard_container)
|
||||
|
||||
void
|
||||
ck_hp_init(struct ck_hp *state,
|
||||
unsigned int degree,
|
||||
unsigned int threshold,
|
||||
ck_hp_destructor_t destroy)
|
||||
{
|
||||
|
||||
state->threshold = threshold;
|
||||
state->degree = degree;
|
||||
state->destroy = destroy;
|
||||
state->n_subscribers = 0;
|
||||
state->n_free = 0;
|
||||
ck_stack_init(&state->subscribers);
|
||||
ck_pr_fence_store();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hp_set_threshold(struct ck_hp *state, unsigned int threshold)
|
||||
{
|
||||
|
||||
ck_pr_store_uint(&state->threshold, threshold);
|
||||
return;
|
||||
}
|
||||
|
||||
struct ck_hp_record *
|
||||
ck_hp_recycle(struct ck_hp *global)
|
||||
{
|
||||
struct ck_hp_record *record;
|
||||
ck_stack_entry_t *entry;
|
||||
int state;
|
||||
|
||||
if (ck_pr_load_uint(&global->n_free) == 0)
|
||||
return NULL;
|
||||
|
||||
CK_STACK_FOREACH(&global->subscribers, entry) {
|
||||
record = ck_hp_record_container(entry);
|
||||
|
||||
if (ck_pr_load_int(&record->state) == CK_HP_FREE) {
|
||||
ck_pr_fence_load();
|
||||
state = ck_pr_fas_int(&record->state, CK_HP_USED);
|
||||
if (state == CK_HP_FREE) {
|
||||
ck_pr_dec_uint(&global->n_free);
|
||||
return record;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hp_unregister(struct ck_hp_record *entry)
|
||||
{
|
||||
|
||||
entry->n_pending = 0;
|
||||
entry->n_peak = 0;
|
||||
entry->n_reclamations = 0;
|
||||
ck_stack_init(&entry->pending);
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_int(&entry->state, CK_HP_FREE);
|
||||
ck_pr_inc_uint(&entry->global->n_free);
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hp_register(struct ck_hp *state,
|
||||
struct ck_hp_record *entry,
|
||||
void **pointers)
|
||||
{
|
||||
|
||||
entry->state = CK_HP_USED;
|
||||
entry->global = state;
|
||||
entry->pointers = pointers;
|
||||
entry->n_pending = 0;
|
||||
entry->n_peak = 0;
|
||||
entry->n_reclamations = 0;
|
||||
memset(pointers, 0, state->degree * sizeof(void *));
|
||||
ck_stack_init(&entry->pending);
|
||||
ck_pr_fence_store();
|
||||
ck_stack_push_upmc(&state->subscribers, &entry->global_entry);
|
||||
ck_pr_inc_uint(&state->n_subscribers);
|
||||
return;
|
||||
}
|
||||
|
||||
static int
|
||||
hazard_compare(const void *a, const void *b)
|
||||
{
|
||||
void * const *x;
|
||||
void * const *y;
|
||||
|
||||
x = a;
|
||||
y = b;
|
||||
return ((*x > *y) - (*x < *y));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_hp_member_scan(ck_stack_entry_t *entry, unsigned int degree, void *pointer)
|
||||
{
|
||||
struct ck_hp_record *record;
|
||||
unsigned int i;
|
||||
void *hazard;
|
||||
|
||||
do {
|
||||
record = ck_hp_record_container(entry);
|
||||
if (ck_pr_load_int(&record->state) == CK_HP_FREE)
|
||||
continue;
|
||||
|
||||
if (ck_pr_load_ptr(&record->pointers) == NULL)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < degree; i++) {
|
||||
hazard = ck_pr_load_ptr(&record->pointers[i]);
|
||||
if (hazard == pointer)
|
||||
return (true);
|
||||
}
|
||||
} while ((entry = CK_STACK_NEXT(entry)) != NULL);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static void *
|
||||
ck_hp_member_cache(struct ck_hp *global, void **cache, unsigned int *n_hazards)
|
||||
{
|
||||
struct ck_hp_record *record;
|
||||
ck_stack_entry_t *entry;
|
||||
unsigned int hazards = 0;
|
||||
unsigned int i;
|
||||
void *pointer;
|
||||
|
||||
CK_STACK_FOREACH(&global->subscribers, entry) {
|
||||
record = ck_hp_record_container(entry);
|
||||
if (ck_pr_load_int(&record->state) == CK_HP_FREE)
|
||||
continue;
|
||||
|
||||
if (ck_pr_load_ptr(&record->pointers) == NULL)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < global->degree; i++) {
|
||||
if (hazards > CK_HP_CACHE)
|
||||
break;
|
||||
|
||||
pointer = ck_pr_load_ptr(&record->pointers[i]);
|
||||
if (pointer != NULL)
|
||||
cache[hazards++] = pointer;
|
||||
}
|
||||
}
|
||||
|
||||
*n_hazards = hazards;
|
||||
return (entry);
|
||||
}
|
||||
|
||||
void
|
||||
ck_hp_reclaim(struct ck_hp_record *thread)
|
||||
{
|
||||
struct ck_hp_hazard *hazard;
|
||||
struct ck_hp *global = thread->global;
|
||||
unsigned int n_hazards;
|
||||
void **cache, *marker, *match;
|
||||
ck_stack_entry_t *previous, *entry, *next;
|
||||
|
||||
/* Store as many entries as possible in local array. */
|
||||
cache = thread->cache;
|
||||
marker = ck_hp_member_cache(global, cache, &n_hazards);
|
||||
|
||||
/*
|
||||
* In theory, there is an n such that (n * (log n) ** 2) < np.
|
||||
*/
|
||||
qsort(cache, n_hazards, sizeof(void *), hazard_compare);
|
||||
|
||||
previous = NULL;
|
||||
CK_STACK_FOREACH_SAFE(&thread->pending, entry, next) {
|
||||
hazard = ck_hp_hazard_container(entry);
|
||||
match = bsearch(&hazard->pointer, cache, n_hazards,
|
||||
sizeof(void *), hazard_compare);
|
||||
if (match != NULL) {
|
||||
previous = entry;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (marker != NULL &&
|
||||
ck_hp_member_scan(marker, global->degree, hazard->pointer)) {
|
||||
previous = entry;
|
||||
continue;
|
||||
}
|
||||
|
||||
thread->n_pending -= 1;
|
||||
|
||||
/* Remove from the pending stack. */
|
||||
if (previous)
|
||||
CK_STACK_NEXT(previous) = CK_STACK_NEXT(entry);
|
||||
else
|
||||
CK_STACK_FIRST(&thread->pending) = CK_STACK_NEXT(entry);
|
||||
|
||||
/* The entry is now safe to destroy. */
|
||||
global->destroy(hazard->data);
|
||||
thread->n_reclamations++;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hp_retire(struct ck_hp_record *thread,
|
||||
struct ck_hp_hazard *hazard,
|
||||
void *data,
|
||||
void *pointer)
|
||||
{
|
||||
|
||||
ck_pr_store_ptr(&hazard->pointer, pointer);
|
||||
ck_pr_store_ptr(&hazard->data, data);
|
||||
ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
|
||||
|
||||
thread->n_pending += 1;
|
||||
if (thread->n_pending > thread->n_peak)
|
||||
thread->n_peak = thread->n_pending;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hp_free(struct ck_hp_record *thread,
|
||||
struct ck_hp_hazard *hazard,
|
||||
void *data,
|
||||
void *pointer)
|
||||
{
|
||||
struct ck_hp *global;
|
||||
|
||||
global = ck_pr_load_ptr(&thread->global);
|
||||
ck_pr_store_ptr(&hazard->data, data);
|
||||
ck_pr_store_ptr(&hazard->pointer, pointer);
|
||||
ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
|
||||
|
||||
thread->n_pending += 1;
|
||||
if (thread->n_pending > thread->n_peak)
|
||||
thread->n_peak = thread->n_pending;
|
||||
|
||||
if (thread->n_pending >= global->threshold)
|
||||
ck_hp_reclaim(thread);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hp_purge(struct ck_hp_record *thread)
|
||||
{
|
||||
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
|
||||
|
||||
while (thread->n_pending > 0) {
|
||||
ck_hp_reclaim(thread);
|
||||
if (thread->n_pending > 0)
|
||||
ck_backoff_eb(&backoff);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
941
sys/contrib/ck/src/ck_hs.c
Normal file
941
sys/contrib/ck/src/ck_hs.c
Normal file
@ -0,0 +1,941 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <ck_cc.h>
|
||||
#include <ck_hs.h>
|
||||
#include <ck_limits.h>
|
||||
#include <ck_md.h>
|
||||
#include <ck_pr.h>
|
||||
#include <ck_stdint.h>
|
||||
#include <ck_stdbool.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
#include "ck_internal.h"
|
||||
|
||||
#ifndef CK_HS_PROBE_L1_SHIFT
|
||||
#define CK_HS_PROBE_L1_SHIFT 3ULL
|
||||
#endif /* CK_HS_PROBE_L1_SHIFT */
|
||||
|
||||
#define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT)
|
||||
#define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1)
|
||||
|
||||
#ifndef CK_HS_PROBE_L1_DEFAULT
|
||||
#define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE
|
||||
#endif
|
||||
|
||||
#define CK_HS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
|
||||
#define CK_HS_VMA(x) \
|
||||
((void *)((uintptr_t)(x) & CK_HS_VMA_MASK))
|
||||
|
||||
#define CK_HS_EMPTY NULL
|
||||
#define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0)
|
||||
#define CK_HS_G (2)
|
||||
#define CK_HS_G_MASK (CK_HS_G - 1)
|
||||
|
||||
#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
|
||||
#define CK_HS_WORD uint8_t
|
||||
#define CK_HS_WORD_MAX UINT8_MAX
|
||||
#define CK_HS_STORE(x, y) ck_pr_store_8(x, y)
|
||||
#define CK_HS_LOAD(x) ck_pr_load_8(x)
|
||||
#elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
|
||||
#define CK_HS_WORD uint16_t
|
||||
#define CK_HS_WORD_MAX UINT16_MAX
|
||||
#define CK_HS_STORE(x, y) ck_pr_store_16(x, y)
|
||||
#define CK_HS_LOAD(x) ck_pr_load_16(x)
|
||||
#elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
|
||||
#define CK_HS_WORD uint32_t
|
||||
#define CK_HS_WORD_MAX UINT32_MAX
|
||||
#define CK_HS_STORE(x, y) ck_pr_store_32(x, y)
|
||||
#define CK_HS_LOAD(x) ck_pr_load_32(x)
|
||||
#else
|
||||
#error "ck_hs is not supported on your platform."
|
||||
#endif
|
||||
|
||||
enum ck_hs_probe_behavior {
|
||||
CK_HS_PROBE = 0, /* Default behavior. */
|
||||
CK_HS_PROBE_TOMBSTONE, /* Short-circuit on tombstone. */
|
||||
CK_HS_PROBE_INSERT /* Short-circuit on probe bound if tombstone found. */
|
||||
};
|
||||
|
||||
struct ck_hs_map {
|
||||
unsigned int generation[CK_HS_G];
|
||||
unsigned int probe_maximum;
|
||||
unsigned long mask;
|
||||
unsigned long step;
|
||||
unsigned int probe_limit;
|
||||
unsigned int tombstones;
|
||||
unsigned long n_entries;
|
||||
unsigned long capacity;
|
||||
unsigned long size;
|
||||
CK_HS_WORD *probe_bound;
|
||||
const void **entries;
|
||||
};
|
||||
|
||||
static inline void
|
||||
ck_hs_map_signal(struct ck_hs_map *map, unsigned long h)
|
||||
{
|
||||
|
||||
h &= CK_HS_G_MASK;
|
||||
ck_pr_store_uint(&map->generation[h],
|
||||
map->generation[h] + 1);
|
||||
ck_pr_fence_store();
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hs_iterator_init(struct ck_hs_iterator *iterator)
|
||||
{
|
||||
|
||||
iterator->cursor = NULL;
|
||||
iterator->offset = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
|
||||
{
|
||||
struct ck_hs_map *map = hs->map;
|
||||
void *value;
|
||||
|
||||
if (i->offset >= map->capacity)
|
||||
return false;
|
||||
|
||||
do {
|
||||
value = CK_CC_DECONST_PTR(map->entries[i->offset]);
|
||||
if (value != CK_HS_EMPTY && value != CK_HS_TOMBSTONE) {
|
||||
#ifdef CK_HS_PP
|
||||
if (hs->mode & CK_HS_MODE_OBJECT)
|
||||
value = CK_HS_VMA(value);
|
||||
#endif
|
||||
i->offset++;
|
||||
*key = value;
|
||||
return true;
|
||||
}
|
||||
} while (++i->offset < map->capacity);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st)
|
||||
{
|
||||
struct ck_hs_map *map = hs->map;
|
||||
|
||||
st->n_entries = map->n_entries;
|
||||
st->tombstones = map->tombstones;
|
||||
st->probe_maximum = map->probe_maximum;
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
ck_hs_count(struct ck_hs *hs)
|
||||
{
|
||||
|
||||
return hs->map->n_entries;
|
||||
}
|
||||
|
||||
static void
|
||||
ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer)
|
||||
{
|
||||
|
||||
m->free(map, map->size, defer);
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_hs_destroy(struct ck_hs *hs)
|
||||
{
|
||||
|
||||
ck_hs_map_destroy(hs->m, hs->map, false);
|
||||
return;
|
||||
}
|
||||
|
||||
static struct ck_hs_map *
|
||||
ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
|
||||
{
|
||||
struct ck_hs_map *map;
|
||||
unsigned long size, n_entries, prefix, limit;
|
||||
|
||||
n_entries = ck_internal_power_2(entries);
|
||||
if (n_entries < CK_HS_PROBE_L1)
|
||||
n_entries = CK_HS_PROBE_L1;
|
||||
|
||||
size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);
|
||||
|
||||
if (hs->mode & CK_HS_MODE_DELETE) {
|
||||
prefix = sizeof(CK_HS_WORD) * n_entries;
|
||||
size += prefix;
|
||||
} else {
|
||||
prefix = 0;
|
||||
}
|
||||
|
||||
map = hs->m->malloc(size);
|
||||
if (map == NULL)
|
||||
return NULL;
|
||||
|
||||
map->size = size;
|
||||
|
||||
/* We should probably use a more intelligent heuristic for default probe length. */
|
||||
limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
|
||||
if (limit > UINT_MAX)
|
||||
limit = UINT_MAX;
|
||||
|
||||
map->probe_limit = (unsigned int)limit;
|
||||
map->probe_maximum = 0;
|
||||
map->capacity = n_entries;
|
||||
map->step = ck_internal_bsf(n_entries);
|
||||
map->mask = n_entries - 1;
|
||||
map->n_entries = 0;
|
||||
|
||||
/* Align map allocation to cache line. */
|
||||
map->entries = (void *)(((uintptr_t)&map[1] + prefix +
|
||||
CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
|
||||
|
||||
memset(map->entries, 0, sizeof(void *) * n_entries);
|
||||
memset(map->generation, 0, sizeof map->generation);
|
||||
|
||||
if (hs->mode & CK_HS_MODE_DELETE) {
|
||||
map->probe_bound = (CK_HS_WORD *)&map[1];
|
||||
memset(map->probe_bound, 0, prefix);
|
||||
} else {
|
||||
map->probe_bound = NULL;
|
||||
}
|
||||
|
||||
/* Commit entries purge with respect to map publication. */
|
||||
ck_pr_fence_store();
|
||||
return map;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity)
|
||||
{
|
||||
struct ck_hs_map *map, *previous;
|
||||
|
||||
previous = hs->map;
|
||||
map = ck_hs_map_create(hs, capacity);
|
||||
if (map == NULL)
|
||||
return false;
|
||||
|
||||
ck_pr_store_ptr(&hs->map, map);
|
||||
ck_hs_map_destroy(hs->m, previous, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_reset(struct ck_hs *hs)
|
||||
{
|
||||
struct ck_hs_map *previous;
|
||||
|
||||
previous = hs->map;
|
||||
return ck_hs_reset_size(hs, previous->capacity);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
ck_hs_map_probe_next(struct ck_hs_map *map,
|
||||
unsigned long offset,
|
||||
unsigned long h,
|
||||
unsigned long level,
|
||||
unsigned long probes)
|
||||
{
|
||||
unsigned long r, stride;
|
||||
|
||||
r = (h >> map->step) >> level;
|
||||
stride = (r & ~CK_HS_PROBE_L1_MASK) << 1 | (r & CK_HS_PROBE_L1_MASK);
|
||||
|
||||
return (offset + (probes >> CK_HS_PROBE_L1_SHIFT) +
|
||||
(stride | CK_HS_PROBE_L1)) & map->mask;
|
||||
}
|
||||
|
||||
static inline void
|
||||
ck_hs_map_bound_set(struct ck_hs_map *m,
|
||||
unsigned long h,
|
||||
unsigned long n_probes)
|
||||
{
|
||||
unsigned long offset = h & m->mask;
|
||||
|
||||
if (n_probes > m->probe_maximum)
|
||||
ck_pr_store_uint(&m->probe_maximum, n_probes);
|
||||
|
||||
if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
|
||||
if (n_probes > CK_HS_WORD_MAX)
|
||||
n_probes = CK_HS_WORD_MAX;
|
||||
|
||||
CK_HS_STORE(&m->probe_bound[offset], n_probes);
|
||||
ck_pr_fence_store();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
ck_hs_map_bound_get(struct ck_hs_map *m, unsigned long h)
|
||||
{
|
||||
unsigned long offset = h & m->mask;
|
||||
unsigned int r = CK_HS_WORD_MAX;
|
||||
|
||||
if (m->probe_bound != NULL) {
|
||||
r = CK_HS_LOAD(&m->probe_bound[offset]);
|
||||
if (r == CK_HS_WORD_MAX)
|
||||
r = ck_pr_load_uint(&m->probe_maximum);
|
||||
} else {
|
||||
r = ck_pr_load_uint(&m->probe_maximum);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_grow(struct ck_hs *hs,
|
||||
unsigned long capacity)
|
||||
{
|
||||
struct ck_hs_map *map, *update;
|
||||
unsigned long k, i, j, offset, probes;
|
||||
const void *previous, **bucket;
|
||||
|
||||
restart:
|
||||
map = hs->map;
|
||||
if (map->capacity > capacity)
|
||||
return false;
|
||||
|
||||
update = ck_hs_map_create(hs, capacity);
|
||||
if (update == NULL)
|
||||
return false;
|
||||
|
||||
for (k = 0; k < map->capacity; k++) {
|
||||
unsigned long h;
|
||||
|
||||
previous = map->entries[k];
|
||||
if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
|
||||
continue;
|
||||
|
||||
#ifdef CK_HS_PP
|
||||
if (hs->mode & CK_HS_MODE_OBJECT)
|
||||
previous = CK_HS_VMA(previous);
|
||||
#endif
|
||||
|
||||
h = hs->hf(previous, hs->seed);
|
||||
offset = h & update->mask;
|
||||
i = probes = 0;
|
||||
|
||||
for (;;) {
|
||||
bucket = (const void **)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));
|
||||
|
||||
for (j = 0; j < CK_HS_PROBE_L1; j++) {
|
||||
const void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
|
||||
|
||||
if (probes++ == update->probe_limit)
|
||||
break;
|
||||
|
||||
if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
|
||||
*cursor = map->entries[k];
|
||||
update->n_entries++;
|
||||
|
||||
ck_hs_map_bound_set(update, h, probes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (j < CK_HS_PROBE_L1)
|
||||
break;
|
||||
|
||||
offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
|
||||
}
|
||||
|
||||
if (probes > update->probe_limit) {
|
||||
/*
|
||||
* We have hit the probe limit, map needs to be even larger.
|
||||
*/
|
||||
ck_hs_map_destroy(hs->m, update, false);
|
||||
capacity <<= 1;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_ptr(&hs->map, update);
|
||||
ck_hs_map_destroy(hs->m, map, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
ck_hs_map_postinsert(struct ck_hs *hs, struct ck_hs_map *map)
|
||||
{
|
||||
|
||||
map->n_entries++;
|
||||
if ((map->n_entries << 1) > map->capacity)
|
||||
ck_hs_grow(hs, map->capacity << 1);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_rebuild(struct ck_hs *hs)
|
||||
{
|
||||
|
||||
return ck_hs_grow(hs, hs->map->capacity);
|
||||
}
|
||||
|
||||
static const void **
|
||||
ck_hs_map_probe(struct ck_hs *hs,
|
||||
struct ck_hs_map *map,
|
||||
unsigned long *n_probes,
|
||||
const void ***priority,
|
||||
unsigned long h,
|
||||
const void *key,
|
||||
const void **object,
|
||||
unsigned long probe_limit,
|
||||
enum ck_hs_probe_behavior behavior)
|
||||
{
|
||||
const void **bucket, **cursor, *k, *compare;
|
||||
const void **pr = NULL;
|
||||
unsigned long offset, j, i, probes, opl;
|
||||
|
||||
#ifdef CK_HS_PP
|
||||
/* If we are storing object pointers, then we may leverage pointer packing. */
|
||||
unsigned long hv = 0;
|
||||
|
||||
if (hs->mode & CK_HS_MODE_OBJECT) {
|
||||
hv = (h >> 25) & CK_HS_KEY_MASK;
|
||||
compare = CK_HS_VMA(key);
|
||||
} else {
|
||||
compare = key;
|
||||
}
|
||||
#else
|
||||
compare = key;
|
||||
#endif
|
||||
|
||||
offset = h & map->mask;
|
||||
*object = NULL;
|
||||
i = probes = 0;
|
||||
|
||||
opl = probe_limit;
|
||||
if (behavior == CK_HS_PROBE_INSERT)
|
||||
probe_limit = ck_hs_map_bound_get(map, h);
|
||||
|
||||
for (;;) {
|
||||
bucket = (const void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1));
|
||||
|
||||
for (j = 0; j < CK_HS_PROBE_L1; j++) {
|
||||
cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
|
||||
|
||||
if (probes++ == probe_limit) {
|
||||
if (probe_limit == opl || pr != NULL) {
|
||||
k = CK_HS_EMPTY;
|
||||
goto leave;
|
||||
}
|
||||
|
||||
/*
|
||||
* If no eligible slot has been found yet, continue probe
|
||||
* sequence with original probe limit.
|
||||
*/
|
||||
probe_limit = opl;
|
||||
}
|
||||
|
||||
k = ck_pr_load_ptr(cursor);
|
||||
if (k == CK_HS_EMPTY)
|
||||
goto leave;
|
||||
|
||||
if (k == CK_HS_TOMBSTONE) {
|
||||
if (pr == NULL) {
|
||||
pr = cursor;
|
||||
*n_probes = probes;
|
||||
|
||||
if (behavior == CK_HS_PROBE_TOMBSTONE) {
|
||||
k = CK_HS_EMPTY;
|
||||
goto leave;
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
#ifdef CK_HS_PP
|
||||
if (hs->mode & CK_HS_MODE_OBJECT) {
|
||||
if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv)
|
||||
continue;
|
||||
|
||||
k = CK_HS_VMA(k);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (k == compare)
|
||||
goto leave;
|
||||
|
||||
if (hs->compare == NULL)
|
||||
continue;
|
||||
|
||||
if (hs->compare(k, key) == true)
|
||||
goto leave;
|
||||
}
|
||||
|
||||
offset = ck_hs_map_probe_next(map, offset, h, i++, probes);
|
||||
}
|
||||
|
||||
leave:
|
||||
if (probes > probe_limit) {
|
||||
cursor = NULL;
|
||||
} else {
|
||||
*object = k;
|
||||
}
|
||||
|
||||
if (pr == NULL)
|
||||
*n_probes = probes;
|
||||
|
||||
*priority = pr;
|
||||
return cursor;
|
||||
}
|
||||
|
||||
static inline const void *
|
||||
ck_hs_marshal(unsigned int mode, const void *key, unsigned long h)
|
||||
{
|
||||
#ifdef CK_HS_PP
|
||||
const void *insert;
|
||||
|
||||
if (mode & CK_HS_MODE_OBJECT) {
|
||||
insert = (void *)((uintptr_t)CK_HS_VMA(key) |
|
||||
((h >> 25) << CK_MD_VMA_BITS));
|
||||
} else {
|
||||
insert = key;
|
||||
}
|
||||
|
||||
return insert;
|
||||
#else
|
||||
(void)mode;
|
||||
(void)h;
|
||||
|
||||
return key;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
|
||||
{
|
||||
unsigned long size = 0;
|
||||
unsigned long i;
|
||||
struct ck_hs_map *map = hs->map;
|
||||
unsigned int maximum;
|
||||
CK_HS_WORD *bounds = NULL;
|
||||
|
||||
if (map->n_entries == 0) {
|
||||
ck_pr_store_uint(&map->probe_maximum, 0);
|
||||
if (map->probe_bound != NULL)
|
||||
memset(map->probe_bound, 0, sizeof(CK_HS_WORD) * map->capacity);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if (cycles == 0) {
|
||||
maximum = 0;
|
||||
|
||||
if (map->probe_bound != NULL) {
|
||||
size = sizeof(CK_HS_WORD) * map->capacity;
|
||||
bounds = hs->m->malloc(size);
|
||||
if (bounds == NULL)
|
||||
return false;
|
||||
|
||||
memset(bounds, 0, size);
|
||||
}
|
||||
} else {
|
||||
maximum = map->probe_maximum;
|
||||
}
|
||||
|
||||
for (i = 0; i < map->capacity; i++) {
|
||||
const void **first, *object, **slot, *entry;
|
||||
unsigned long n_probes, offset, h;
|
||||
|
||||
entry = map->entries[(i + seed) & map->mask];
|
||||
if (entry == CK_HS_EMPTY || entry == CK_HS_TOMBSTONE)
|
||||
continue;
|
||||
|
||||
#ifdef CK_HS_PP
|
||||
if (hs->mode & CK_HS_MODE_OBJECT)
|
||||
entry = CK_HS_VMA(entry);
|
||||
#endif
|
||||
|
||||
h = hs->hf(entry, hs->seed);
|
||||
offset = h & map->mask;
|
||||
|
||||
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, entry, &object,
|
||||
ck_hs_map_bound_get(map, h), CK_HS_PROBE);
|
||||
|
||||
if (first != NULL) {
|
||||
const void *insert = ck_hs_marshal(hs->mode, entry, h);
|
||||
|
||||
ck_pr_store_ptr(first, insert);
|
||||
ck_hs_map_signal(map, h);
|
||||
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
|
||||
}
|
||||
|
||||
if (cycles == 0) {
|
||||
if (n_probes > maximum)
|
||||
maximum = n_probes;
|
||||
|
||||
if (n_probes > CK_HS_WORD_MAX)
|
||||
n_probes = CK_HS_WORD_MAX;
|
||||
|
||||
if (bounds != NULL && n_probes > bounds[offset])
|
||||
bounds[offset] = n_probes;
|
||||
} else if (--cycles == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* The following only apply to garbage collection involving
|
||||
* a full scan of all entries.
|
||||
*/
|
||||
if (maximum != map->probe_maximum)
|
||||
ck_pr_store_uint(&map->probe_maximum, maximum);
|
||||
|
||||
if (bounds != NULL) {
|
||||
for (i = 0; i < map->capacity; i++)
|
||||
CK_HS_STORE(&map->probe_bound[i], bounds[i]);
|
||||
|
||||
hs->m->free(bounds, size, false);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_fas(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key,
|
||||
void **previous)
|
||||
{
|
||||
const void **slot, **first, *object, *insert;
|
||||
struct ck_hs_map *map = hs->map;
|
||||
unsigned long n_probes;
|
||||
|
||||
*previous = NULL;
|
||||
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
|
||||
ck_hs_map_bound_get(map, h), CK_HS_PROBE);
|
||||
|
||||
/* Replacement semantics presume existence. */
|
||||
if (object == NULL)
|
||||
return false;
|
||||
|
||||
insert = ck_hs_marshal(hs->mode, key, h);
|
||||
|
||||
if (first != NULL) {
|
||||
ck_pr_store_ptr(first, insert);
|
||||
ck_hs_map_signal(map, h);
|
||||
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
|
||||
} else {
|
||||
ck_pr_store_ptr(slot, insert);
|
||||
}
|
||||
|
||||
*previous = CK_CC_DECONST_PTR(object);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* An apply function takes two arguments. The first argument is a pointer to a
|
||||
* pre-existing object. The second argument is a pointer to the fifth argument
|
||||
* passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
|
||||
* and the return value of the apply function is NULL, then the pre-existing
|
||||
* value is deleted. If the return pointer is the same as the one passed to the
|
||||
* apply function then no changes are made to the hash table. If the first
|
||||
* argument is non-NULL and the return pointer is different than that passed to
|
||||
* the apply function, then the pre-existing value is replaced. For
|
||||
* replacement, it is required that the value itself is identical to the
|
||||
* previous value.
|
||||
*/
|
||||
bool
|
||||
ck_hs_apply(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key,
|
||||
ck_hs_apply_fn_t *fn,
|
||||
void *cl)
|
||||
{
|
||||
const void **slot, **first, *object, *delta, *insert;
|
||||
unsigned long n_probes;
|
||||
struct ck_hs_map *map;
|
||||
|
||||
restart:
|
||||
map = hs->map;
|
||||
|
||||
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
|
||||
if (slot == NULL && first == NULL) {
|
||||
if (ck_hs_grow(hs, map->capacity << 1) == false)
|
||||
return false;
|
||||
|
||||
goto restart;
|
||||
}
|
||||
|
||||
delta = fn(CK_CC_DECONST_PTR(object), cl);
|
||||
if (delta == NULL) {
|
||||
/*
|
||||
* The apply function has requested deletion. If the object doesn't exist,
|
||||
* then exit early.
|
||||
*/
|
||||
if (CK_CC_UNLIKELY(object == NULL))
|
||||
return true;
|
||||
|
||||
/* Otherwise, mark slot as deleted. */
|
||||
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
|
||||
map->n_entries--;
|
||||
map->tombstones++;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* The apply function has not requested hash set modification so exit early. */
|
||||
if (delta == object)
|
||||
return true;
|
||||
|
||||
/* A modification or insertion has been requested. */
|
||||
ck_hs_map_bound_set(map, h, n_probes);
|
||||
|
||||
insert = ck_hs_marshal(hs->mode, delta, h);
|
||||
if (first != NULL) {
|
||||
/*
|
||||
* This follows the same semantics as ck_hs_set, please refer to that
|
||||
* function for documentation.
|
||||
*/
|
||||
ck_pr_store_ptr(first, insert);
|
||||
|
||||
if (object != NULL) {
|
||||
ck_hs_map_signal(map, h);
|
||||
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If we are storing into same slot, then atomic store is sufficient
|
||||
* for replacement.
|
||||
*/
|
||||
ck_pr_store_ptr(slot, insert);
|
||||
}
|
||||
|
||||
if (object == NULL)
|
||||
ck_hs_map_postinsert(hs, map);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_set(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key,
|
||||
void **previous)
|
||||
{
|
||||
const void **slot, **first, *object, *insert;
|
||||
unsigned long n_probes;
|
||||
struct ck_hs_map *map;
|
||||
|
||||
*previous = NULL;
|
||||
|
||||
restart:
|
||||
map = hs->map;
|
||||
|
||||
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
|
||||
if (slot == NULL && first == NULL) {
|
||||
if (ck_hs_grow(hs, map->capacity << 1) == false)
|
||||
return false;
|
||||
|
||||
goto restart;
|
||||
}
|
||||
|
||||
ck_hs_map_bound_set(map, h, n_probes);
|
||||
insert = ck_hs_marshal(hs->mode, key, h);
|
||||
|
||||
if (first != NULL) {
|
||||
/* If an earlier bucket was found, then store entry there. */
|
||||
ck_pr_store_ptr(first, insert);
|
||||
|
||||
/*
|
||||
* If a duplicate key was found, then delete it after
|
||||
* signaling concurrent probes to restart. Optionally,
|
||||
* it is possible to install tombstone after grace
|
||||
* period if we can guarantee earlier position of
|
||||
* duplicate key.
|
||||
*/
|
||||
if (object != NULL) {
|
||||
ck_hs_map_signal(map, h);
|
||||
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If we are storing into same slot, then atomic store is sufficient
|
||||
* for replacement.
|
||||
*/
|
||||
ck_pr_store_ptr(slot, insert);
|
||||
}
|
||||
|
||||
if (object == NULL)
|
||||
ck_hs_map_postinsert(hs, map);
|
||||
|
||||
*previous = CK_CC_DECONST_PTR(object);
|
||||
return true;
|
||||
}
|
||||
|
||||
CK_CC_INLINE static bool
|
||||
ck_hs_put_internal(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key,
|
||||
enum ck_hs_probe_behavior behavior)
|
||||
{
|
||||
const void **slot, **first, *object, *insert;
|
||||
unsigned long n_probes;
|
||||
struct ck_hs_map *map;
|
||||
|
||||
restart:
|
||||
map = hs->map;
|
||||
|
||||
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
|
||||
map->probe_limit, behavior);
|
||||
|
||||
if (slot == NULL && first == NULL) {
|
||||
if (ck_hs_grow(hs, map->capacity << 1) == false)
|
||||
return false;
|
||||
|
||||
goto restart;
|
||||
}
|
||||
|
||||
/* Fail operation if a match was found. */
|
||||
if (object != NULL)
|
||||
return false;
|
||||
|
||||
ck_hs_map_bound_set(map, h, n_probes);
|
||||
insert = ck_hs_marshal(hs->mode, key, h);
|
||||
|
||||
if (first != NULL) {
|
||||
/* Insert key into first bucket in probe sequence. */
|
||||
ck_pr_store_ptr(first, insert);
|
||||
} else {
|
||||
/* An empty slot was found. */
|
||||
ck_pr_store_ptr(slot, insert);
|
||||
}
|
||||
|
||||
ck_hs_map_postinsert(hs, map);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_put(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key)
|
||||
{
|
||||
|
||||
return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_INSERT);
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_put_unique(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key)
|
||||
{
|
||||
|
||||
return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_TOMBSTONE);
|
||||
}
|
||||
|
||||
void *
|
||||
ck_hs_get(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key)
|
||||
{
|
||||
const void **first, *object;
|
||||
struct ck_hs_map *map;
|
||||
unsigned long n_probes;
|
||||
unsigned int g, g_p, probe;
|
||||
unsigned int *generation;
|
||||
|
||||
do {
|
||||
map = ck_pr_load_ptr(&hs->map);
|
||||
generation = &map->generation[h & CK_HS_G_MASK];
|
||||
g = ck_pr_load_uint(generation);
|
||||
probe = ck_hs_map_bound_get(map, h);
|
||||
ck_pr_fence_load();
|
||||
|
||||
ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe, CK_HS_PROBE);
|
||||
|
||||
ck_pr_fence_load();
|
||||
g_p = ck_pr_load_uint(generation);
|
||||
} while (g != g_p);
|
||||
|
||||
return CK_CC_DECONST_PTR(object);
|
||||
}
|
||||
|
||||
void *
|
||||
ck_hs_remove(struct ck_hs *hs,
|
||||
unsigned long h,
|
||||
const void *key)
|
||||
{
|
||||
const void **slot, **first, *object;
|
||||
struct ck_hs_map *map = hs->map;
|
||||
unsigned long n_probes;
|
||||
|
||||
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
|
||||
ck_hs_map_bound_get(map, h), CK_HS_PROBE);
|
||||
if (object == NULL)
|
||||
return NULL;
|
||||
|
||||
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
|
||||
map->n_entries--;
|
||||
map->tombstones++;
|
||||
return CK_CC_DECONST_PTR(object);
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_move(struct ck_hs *hs,
|
||||
struct ck_hs *source,
|
||||
ck_hs_hash_cb_t *hf,
|
||||
ck_hs_compare_cb_t *compare,
|
||||
struct ck_malloc *m)
|
||||
{
|
||||
|
||||
if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
|
||||
return false;
|
||||
|
||||
hs->mode = source->mode;
|
||||
hs->seed = source->seed;
|
||||
hs->map = source->map;
|
||||
hs->m = m;
|
||||
hs->hf = hf;
|
||||
hs->compare = compare;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_hs_init(struct ck_hs *hs,
|
||||
unsigned int mode,
|
||||
ck_hs_hash_cb_t *hf,
|
||||
ck_hs_compare_cb_t *compare,
|
||||
struct ck_malloc *m,
|
||||
unsigned long n_entries,
|
||||
unsigned long seed)
|
||||
{
|
||||
|
||||
if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
|
||||
return false;
|
||||
|
||||
hs->m = m;
|
||||
hs->mode = mode;
|
||||
hs->seed = seed;
|
||||
hs->hf = hf;
|
||||
hs->compare = compare;
|
||||
|
||||
hs->map = ck_hs_map_create(hs, n_entries);
|
||||
return hs->map != NULL;
|
||||
}
|
1036
sys/contrib/ck/src/ck_ht.c
Normal file
1036
sys/contrib/ck/src/ck_ht.c
Normal file
File diff suppressed because it is too large
Load Diff
269
sys/contrib/ck/src/ck_ht_hash.h
Normal file
269
sys/contrib/ck/src/ck_ht_hash.h
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Copyright 2012-2015 Samy Al Bahra
|
||||
* Copyright 2011-2014 AppNexus, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef CK_HT_HASH_H
|
||||
#define CK_HT_HASH_H
|
||||
|
||||
/*
|
||||
* This is the Murmur hash written by Austin Appleby.
|
||||
*/
|
||||
|
||||
#include <ck_stdint.h>
|
||||
#include <ck_string.h>
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
|
||||
// Note - The x86 and x64 versions do _not_ produce the same results, as the
|
||||
// algorithms are optimized for their respective platforms. You can still
|
||||
// compile and run any of them on any platform, but your performance with the
|
||||
// non-native version will be less than optimal.
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Platform-specific functions and macros
|
||||
|
||||
// Microsoft Visual Studio
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
#define FORCE_INLINE __forceinline
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#define ROTL32(x,y) _rotl(x,y)
|
||||
#define ROTL64(x,y) _rotl64(x,y)
|
||||
|
||||
#define BIG_CONSTANT(x) (x)
|
||||
|
||||
// Other compilers
|
||||
|
||||
#else // defined(_MSC_VER)
|
||||
|
||||
#define FORCE_INLINE inline __attribute__((always_inline))
|
||||
|
||||
static inline uint32_t rotl32 ( uint32_t x, int8_t r )
|
||||
{
|
||||
return (x << r) | (x >> (32 - r));
|
||||
}
|
||||
|
||||
static inline uint64_t rotl64 ( uint64_t x, int8_t r )
|
||||
{
|
||||
return (x << r) | (x >> (64 - r));
|
||||
}
|
||||
|
||||
#define ROTL32(x,y) rotl32(x,y)
|
||||
#define ROTL64(x,y) rotl64(x,y)
|
||||
|
||||
#define BIG_CONSTANT(x) (x##LLU)
|
||||
|
||||
#endif // !defined(_MSC_VER)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Block read - if your platform needs to do endian-swapping or can only
|
||||
// handle aligned reads, do the conversion here
|
||||
|
||||
FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i )
|
||||
{
|
||||
return p[i];
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Finalization mix - force all bits of a hash block to avalanche
|
||||
|
||||
FORCE_INLINE static uint32_t fmix ( uint32_t h )
|
||||
{
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
h *= 0xc2b2ae35;
|
||||
h ^= h >> 16;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
static inline void MurmurHash3_x86_32 ( const void * key, int len,
|
||||
uint32_t seed, uint32_t * out )
|
||||
{
|
||||
const uint8_t * data = (const uint8_t*)key;
|
||||
const int nblocks = len / 4;
|
||||
int i;
|
||||
|
||||
uint32_t h1 = seed;
|
||||
|
||||
uint32_t c1 = 0xcc9e2d51;
|
||||
uint32_t c2 = 0x1b873593;
|
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint32_t * blocks = (const uint32_t *)(const void *)(data + nblocks*4);
|
||||
|
||||
for(i = -nblocks; i; i++)
|
||||
{
|
||||
uint32_t k1 = getblock(blocks,i);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = ROTL32(k1,15);
|
||||
k1 *= c2;
|
||||
|
||||
h1 ^= k1;
|
||||
h1 = ROTL32(h1,13);
|
||||
h1 = h1*5+0xe6546b64;
|
||||
}
|
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
|
||||
|
||||
uint32_t k1 = 0;
|
||||
|
||||
switch(len & 3)
|
||||
{
|
||||
case 3: k1 ^= tail[2] << 16;
|
||||
case 2: k1 ^= tail[1] << 8;
|
||||
case 1: k1 ^= tail[0];
|
||||
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
|
||||
};
|
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= len;
|
||||
|
||||
h1 = fmix(h1);
|
||||
|
||||
*(uint32_t *)out = h1;
|
||||
}
|
||||
|
||||
static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed )
|
||||
{
|
||||
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
|
||||
const int r = 47;
|
||||
|
||||
uint64_t h = seed ^ (len * m);
|
||||
|
||||
const uint64_t * data = (const uint64_t *)key;
|
||||
const uint64_t * end = data + (len/8);
|
||||
|
||||
while(data != end)
|
||||
{
|
||||
uint64_t k;
|
||||
|
||||
if (!((uintptr_t)data & 0x7))
|
||||
k = *data++;
|
||||
else {
|
||||
memcpy(&k, data, sizeof(k));
|
||||
data++;
|
||||
}
|
||||
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
|
||||
h ^= k;
|
||||
h *= m;
|
||||
}
|
||||
|
||||
const unsigned char * data2 = (const unsigned char*)data;
|
||||
|
||||
switch(len & 7)
|
||||
{
|
||||
case 7: h ^= (uint64_t)(data2[6]) << 48;
|
||||
case 6: h ^= (uint64_t)(data2[5]) << 40;
|
||||
case 5: h ^= (uint64_t)(data2[4]) << 32;
|
||||
case 4: h ^= (uint64_t)(data2[3]) << 24;
|
||||
case 3: h ^= (uint64_t)(data2[2]) << 16;
|
||||
case 2: h ^= (uint64_t)(data2[1]) << 8;
|
||||
case 1: h ^= (uint64_t)(data2[0]);
|
||||
h *= m;
|
||||
};
|
||||
|
||||
h ^= h >> r;
|
||||
h *= m;
|
||||
h ^= h >> r;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
|
||||
// 64-bit hash for 32-bit platforms
|
||||
|
||||
static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed )
|
||||
{
|
||||
const uint32_t m = 0x5bd1e995;
|
||||
const int r = 24;
|
||||
|
||||
uint32_t h1 = (uint32_t)(seed) ^ len;
|
||||
uint32_t h2 = (uint32_t)(seed >> 32);
|
||||
|
||||
const uint32_t * data = (const uint32_t *)key;
|
||||
|
||||
while(len >= 8)
|
||||
{
|
||||
uint32_t k1 = *data++;
|
||||
k1 *= m; k1 ^= k1 >> r; k1 *= m;
|
||||
h1 *= m; h1 ^= k1;
|
||||
len -= 4;
|
||||
|
||||
uint32_t k2 = *data++;
|
||||
k2 *= m; k2 ^= k2 >> r; k2 *= m;
|
||||
h2 *= m; h2 ^= k2;
|
||||
len -= 4;
|
||||
}
|
||||
|
||||
if(len >= 4)
|
||||
{
|
||||
uint32_t k1 = *data++;
|
||||
k1 *= m; k1 ^= k1 >> r; k1 *= m;
|
||||
h1 *= m; h1 ^= k1;
|
||||
len -= 4;
|
||||
}
|
||||
|
||||
switch(len)
|
||||
{
|
||||
case 3: h2 ^= ((const unsigned char*)data)[2] << 16;
|
||||
case 2: h2 ^= ((const unsigned char*)data)[1] << 8;
|
||||
case 1: h2 ^= ((const unsigned char*)data)[0];
|
||||
h2 *= m;
|
||||
};
|
||||
|
||||
h1 ^= h2 >> 18; h1 *= m;
|
||||
h2 ^= h1 >> 22; h2 *= m;
|
||||
h1 ^= h2 >> 17; h1 *= m;
|
||||
h2 ^= h1 >> 19; h2 *= m;
|
||||
|
||||
uint64_t h = h1;
|
||||
|
||||
h = (h << 32) | h2;
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
#endif /* CK_HT_HASH_H */
|
119
sys/contrib/ck/src/ck_internal.h
Normal file
119
sys/contrib/ck/src/ck_internal.h
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright 2011-2015 Samy Al Bahra.
|
||||
* Copyright 2011 David Joseph.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Several of these are from: http://graphics.stanford.edu/~seander/bithacks.html
|
||||
*/
|
||||
|
||||
#define CK_INTERNAL_LOG_0 (0xAAAAAAAA)
|
||||
#define CK_INTERNAL_LOG_1 (0xCCCCCCCC)
|
||||
#define CK_INTERNAL_LOG_2 (0xF0F0F0F0)
|
||||
#define CK_INTERNAL_LOG_3 (0xFF00FF00)
|
||||
#define CK_INTERNAL_LOG_4 (0xFFFF0000)
|
||||
|
||||
CK_CC_INLINE static uint32_t
|
||||
ck_internal_log(uint32_t v)
|
||||
{
|
||||
uint32_t r = (v & CK_INTERNAL_LOG_0) != 0;
|
||||
|
||||
r |= ((v & CK_INTERNAL_LOG_4) != 0) << 4;
|
||||
r |= ((v & CK_INTERNAL_LOG_3) != 0) << 3;
|
||||
r |= ((v & CK_INTERNAL_LOG_2) != 0) << 2;
|
||||
r |= ((v & CK_INTERNAL_LOG_1) != 0) << 1;
|
||||
return (r);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uint32_t
|
||||
ck_internal_power_2(uint32_t v)
|
||||
{
|
||||
|
||||
--v;
|
||||
v |= v >> 1;
|
||||
v |= v >> 2;
|
||||
v |= v >> 4;
|
||||
v |= v >> 8;
|
||||
v |= v >> 16;
|
||||
return (++v);
|
||||
}
|
||||
|
||||
CK_CC_INLINE static unsigned long
|
||||
ck_internal_max(unsigned long x, unsigned long y)
|
||||
{
|
||||
|
||||
return x ^ ((x ^ y) & -(x < y));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uint64_t
|
||||
ck_internal_max_64(uint64_t x, uint64_t y)
|
||||
{
|
||||
|
||||
return x ^ ((x ^ y) & -(x < y));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uint32_t
|
||||
ck_internal_max_32(uint32_t x, uint32_t y)
|
||||
{
|
||||
|
||||
return x ^ ((x ^ y) & -(x < y));
|
||||
}
|
||||
|
||||
CK_CC_INLINE static unsigned long
|
||||
ck_internal_bsf(unsigned long v)
|
||||
{
|
||||
#if defined(__GNUC__)
|
||||
return __builtin_ffs(v);
|
||||
#else
|
||||
unsigned int i;
|
||||
const unsigned int s = sizeof(unsigned long) * 8 - 1;
|
||||
|
||||
for (i = 0; i < s; i++) {
|
||||
if (v & (1UL << (s - i)))
|
||||
return sizeof(unsigned long) * 8 - i;
|
||||
}
|
||||
|
||||
return 1;
|
||||
#endif /* !__GNUC__ */
|
||||
}
|
||||
|
||||
CK_CC_INLINE static uint64_t
|
||||
ck_internal_bsf_64(uint64_t v)
|
||||
{
|
||||
#if defined(__GNUC__)
|
||||
return __builtin_ffs(v);
|
||||
#else
|
||||
unsigned int i;
|
||||
const unsigned int s = sizeof(unsigned long) * 8 - 1;
|
||||
|
||||
for (i = 0; i < s; i++) {
|
||||
if (v & (1ULL << (63U - i)))
|
||||
return i;
|
||||
}
|
||||
#endif /* !__GNUC__ */
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
1480
sys/contrib/ck/src/ck_rhs.c
Normal file
1480
sys/contrib/ck/src/ck_rhs.c
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user