lib: 32/64-bit cleanups

Signed-off-by: Intel
This commit is contained in:
Intel 2013-06-03 00:00:00 +00:00 committed by David Marchand
parent a2ca32f764
commit 3b46fb77eb
17 changed files with 282 additions and 1316 deletions

View File

@ -105,7 +105,7 @@
static rte_atomic16_t a16;
static rte_atomic32_t a32;
static rte_atomic64_t a64;
static rte_atomic32_t count;
static rte_atomic64_t count;
static rte_atomic32_t synchro;
static int
@ -153,11 +153,11 @@ test_atomic_tas(__attribute__((unused)) void *arg)
;
if (rte_atomic16_test_and_set(&a16))
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
if (rte_atomic32_test_and_set(&a32))
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
if (rte_atomic64_test_and_set(&a64))
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
return 0;
}
@ -175,22 +175,22 @@ test_atomic_addsub_and_return(__attribute__((unused)) void *arg)
for (i = 0; i < N; i++) {
tmp16 = rte_atomic16_add_return(&a16, 1);
rte_atomic32_add(&count, tmp16);
rte_atomic64_add(&count, tmp16);
tmp16 = rte_atomic16_sub_return(&a16, 1);
rte_atomic32_sub(&count, tmp16+1);
rte_atomic64_sub(&count, tmp16+1);
tmp32 = rte_atomic32_add_return(&a32, 1);
rte_atomic32_add(&count, tmp32);
rte_atomic64_add(&count, tmp32);
tmp32 = rte_atomic32_sub_return(&a32, 1);
rte_atomic32_sub(&count, tmp32+1);
rte_atomic64_sub(&count, tmp32+1);
tmp64 = rte_atomic64_add_return(&a64, 1);
rte_atomic32_add(&count, tmp64);
rte_atomic64_add(&count, tmp64);
tmp64 = rte_atomic64_sub_return(&a64, 1);
rte_atomic32_sub(&count, tmp64+1);
rte_atomic64_sub(&count, tmp64+1);
}
return 0;
@ -213,13 +213,13 @@ test_atomic_inc_and_test(__attribute__((unused)) void *arg)
;
if (rte_atomic16_inc_and_test(&a16)) {
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
}
if (rte_atomic32_inc_and_test(&a32)) {
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
}
if (rte_atomic64_inc_and_test(&a64)) {
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
}
return 0;
@ -240,13 +240,13 @@ test_atomic_dec_and_test(__attribute__((unused)) void *arg)
;
if (rte_atomic16_dec_and_test(&a16))
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
if (rte_atomic32_dec_and_test(&a32))
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
if (rte_atomic64_dec_and_test(&a64))
rte_atomic32_inc(&count);
rte_atomic64_inc(&count);
return 0;
}
@ -257,7 +257,7 @@ test_atomic(void)
rte_atomic16_init(&a16);
rte_atomic32_init(&a32);
rte_atomic64_init(&a64);
rte_atomic32_init(&count);
rte_atomic64_init(&count);
rte_atomic32_init(&synchro);
rte_atomic16_set(&a16, 1UL << 10);
@ -291,13 +291,13 @@ test_atomic(void)
rte_atomic64_set(&a64, 0);
rte_atomic32_set(&a32, 0);
rte_atomic16_set(&a16, 0);
rte_atomic32_set(&count, 0);
rte_atomic64_set(&count, 0);
rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_set(&synchro, 0);
if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) {
if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
printf("Atomic test and set failed\n");
return -1;
}
@ -307,14 +307,14 @@ test_atomic(void)
rte_atomic64_set(&a64, 0);
rte_atomic32_set(&a32, 0);
rte_atomic16_set(&a16, 0);
rte_atomic32_set(&count, 0);
rte_atomic64_set(&count, 0);
rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL,
SKIP_MASTER);
rte_atomic32_set(&synchro, 1);
rte_eal_mp_wait_lcore();
rte_atomic32_set(&synchro, 0);
if (rte_atomic32_read(&count) != 0) {
if (rte_atomic64_read(&count) != 0) {
printf("Atomic add/sub+return failed\n");
return -1;
}
@ -338,7 +338,7 @@ test_atomic(void)
rte_atomic32_clear(&a32);
rte_atomic16_clear(&a16);
rte_atomic32_clear(&synchro);
rte_atomic32_clear(&count);
rte_atomic64_clear(&count);
rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count()));
rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count()));
@ -348,7 +348,7 @@ test_atomic(void)
rte_eal_mp_wait_lcore();
rte_atomic32_clear(&synchro);
if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) {
if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
printf("Atomic inc and test failed %d\n", (int)count.cnt);
return -1;
}
@ -360,7 +360,7 @@ test_atomic(void)
printf("dec and test\n");
rte_atomic32_clear(&synchro);
rte_atomic32_clear(&count);
rte_atomic64_clear(&count);
rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1));
rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1));
@ -370,7 +370,7 @@ test_atomic(void)
rte_eal_mp_wait_lcore();
rte_atomic32_clear(&synchro);
if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) {
if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
printf("Atomic dec and test failed\n");
return -1;
}

View File

@ -118,7 +118,7 @@ test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
return -1;
}
mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", 0x1900000000ULL,
mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1,
SOCKET_ID_ANY, 0);
if (mz != NULL) {
printf("It is impossible to reserve such big a memzone\n");
@ -268,9 +268,9 @@ test_memzone_reserve_max(void)
const struct rte_memseg *ms;
int memseg_idx = 0;
int memzone_idx = 0;
uint64_t len = 0;
size_t len = 0;
void* last_addr;
uint64_t maxlen = 0;
size_t maxlen = 0;
/* get pointer to global configuration */
config = rte_eal_get_configuration();
@ -285,7 +285,7 @@ test_memzone_reserve_max(void)
/* align everything */
last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
len &= ~((uint64_t) CACHE_LINE_MASK);
len &= ~((size_t) CACHE_LINE_MASK);
/* cycle through all memzones */
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
@ -298,8 +298,7 @@ test_memzone_reserve_max(void)
if ((config->mem_config->memzone[memzone_idx].addr >=
ms[memseg_idx].addr) &&
(config->mem_config->memzone[memzone_idx].addr <=
(RTE_PTR_ADD(ms[memseg_idx].addr,
(size_t)ms[memseg_idx].len)))) {
(RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
/* since the zones can now be aligned and occasionally skip
* some space, we should calculate the length based on
* reported length and start addresses difference. Addresses
@ -336,7 +335,7 @@ test_memzone_reserve_max(void)
if (mz->len != maxlen) {
printf("Memzone reserve with 0 size did not return bigest block\n");
printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n",
printf("Expected size = %zu, actual size = %zu\n",
maxlen, mz->len);
rte_dump_physmem_layout();
rte_memzone_dump();
@ -354,9 +353,10 @@ test_memzone_reserve_max_aligned(void)
const struct rte_memseg *ms;
int memseg_idx = 0;
int memzone_idx = 0;
uint64_t addr_offset, len = 0;
uintptr_t addr_offset;
size_t len = 0;
void* last_addr;
uint64_t maxlen = 0;
size_t maxlen = 0;
/* random alignment */
rte_srand((unsigned)rte_rdtsc());
@ -378,7 +378,7 @@ test_memzone_reserve_max_aligned(void)
/* align everything */
last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
len &= ~((uint64_t) CACHE_LINE_MASK);
len &= ~((size_t) CACHE_LINE_MASK);
/* cycle through all memzones */
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
@ -391,8 +391,7 @@ test_memzone_reserve_max_aligned(void)
if ((config->mem_config->memzone[memzone_idx].addr >=
ms[memseg_idx].addr) &&
(config->mem_config->memzone[memzone_idx].addr <=
(RTE_PTR_ADD(ms[memseg_idx].addr,
(size_t) ms[memseg_idx].len)))) {
(RTE_PTR_ADD(ms[memseg_idx].addr, ms[memseg_idx].len)))) {
/* since the zones can now be aligned and occasionally skip
* some space, we should calculate the length based on
* reported length and start addresses difference.
@ -433,7 +432,7 @@ test_memzone_reserve_max_aligned(void)
if (mz->len != maxlen) {
printf("Memzone reserve with 0 size and alignment %u did not return"
" bigest block\n", align);
printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n",
printf("Expected size = %zu, actual size = %zu\n",
maxlen, mz->len);
rte_dump_physmem_layout();
rte_memzone_dump();

View File

@ -95,9 +95,9 @@ rte_dump_physmem_layout(void)
if (mcfg->memseg[i].addr == NULL)
break;
printf("Segment %u: phys:0x%"PRIx64", len:0x%"PRIx64", "
printf("Segment %u: phys:0x%"PRIx64", len:%zu, "
"virt:%p, socket_id:%"PRId32", "
"hugepage_sz:0x%"PRIx64", nchannel:%"PRIx32", "
"hugepage_sz:0x%zu, nchannel:%"PRIx32", "
"nrank:%"PRIx32"\n", i,
mcfg->memseg[i].phys_addr,
mcfg->memseg[i].len,

View File

@ -83,7 +83,7 @@ memzone_lookup_thread_unsafe(const char *name)
* allocation cannot be done, return NULL.
*/
const struct rte_memzone *
rte_memzone_reserve(const char *name, uint64_t len, int socket_id,
rte_memzone_reserve(const char *name, size_t len, int socket_id,
unsigned flags)
{
return rte_memzone_reserve_aligned(name,
@ -91,14 +91,15 @@ rte_memzone_reserve(const char *name, uint64_t len, int socket_id,
}
static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, uint64_t len,
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
int socket_id, unsigned flags, unsigned align)
{
struct rte_mem_config *mcfg;
unsigned i = 0;
int memseg_idx = -1;
uint64_t addr_offset, requested_len;
uint64_t memseg_len = 0;
uint64_t addr_offset;
size_t requested_len;
size_t memseg_len = 0;
phys_addr_t memseg_physaddr;
void *memseg_addr;
@ -120,9 +121,13 @@ memzone_reserve_aligned_thread_unsafe(const char *name, uint64_t len,
return NULL;
}
/* align length on cache boundary */
/* align length on cache boundary. Check for overflow before doing so */
if (len > SIZE_MAX - CACHE_LINE_MASK) {
rte_errno = EINVAL; /* requested size too big */
return NULL;
}
len += CACHE_LINE_MASK;
len &= ~((uint64_t) CACHE_LINE_MASK);
len &= ~((size_t) CACHE_LINE_MASK);
/* save original length */
requested_len = len;
@ -198,7 +203,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, uint64_t len,
return memzone_reserve_aligned_thread_unsafe(name, len - align,
socket_id, 0, align);
RTE_LOG(ERR, EAL, "%s(%s, %" PRIu64 ", %d): "
RTE_LOG(ERR, EAL, "%s(%s, %zu, %d): "
"No appropriate segment found\n",
__func__, name, requested_len, socket_id);
rte_errno = ENOMEM;
@ -211,14 +216,15 @@ memzone_reserve_aligned_thread_unsafe(const char *name, uint64_t len,
/* save aligned physical and virtual addresses */
memseg_physaddr = free_memseg[memseg_idx].phys_addr + addr_offset;
memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr, (uintptr_t) addr_offset);
memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr,
(uintptr_t) addr_offset);
/* if we are looking for a biggest memzone */
if (requested_len == 0)
requested_len = memseg_len - addr_offset;
/* set length to correct value */
len = addr_offset + requested_len;
len = (size_t)addr_offset + requested_len;
/* update our internal state */
free_memseg[memseg_idx].len -= len;
@ -244,7 +250,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, uint64_t len,
* specified alignment). If the allocation cannot be done, return NULL.
*/
const struct rte_memzone *
rte_memzone_reserve_aligned(const char *name, uint64_t len,
rte_memzone_reserve_aligned(const char *name, size_t len,
int socket_id, unsigned flags, unsigned align)
{
struct rte_mem_config *mcfg;
@ -316,7 +322,7 @@ rte_memzone_dump(void)
for (i=0; i<RTE_MAX_MEMZONE; i++) {
if (mcfg->memzone[i].addr == NULL)
break;
printf("Zone %o: name:<%s>, phys:0x%"PRIx64", len:0x%"PRIx64""
printf("Zone %o: name:<%s>, phys:0x%"PRIx64", len:0x%zx"
", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
mcfg->memzone[i].name,
mcfg->memzone[i].phys_addr,

View File

@ -45,612 +45,12 @@
#ifndef _RTE_I686_ATOMIC_H_
#define _RTE_I686_ATOMIC_H_
/**
* @file
* Atomic Operations on i686
*/
#if RTE_MAX_LCORE == 1
#define MPLOCKED /**< No need to insert MP lock prefix. */
#else
#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
#endif
/**
* General memory barrier.
*
* Guarantees that the LOAD and STORE operations generated before the
* barrier occur before the LOAD and STORE operations generated after.
*/
#define rte_mb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory")
/**
* Write memory barrier.
*
* Guarantees that the STORE operations generated before the barrier
* occur before the STORE operations generated after.
*/
#define rte_wmb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory")
/**
* Read memory barrier.
*
* Guarantees that the LOAD operations generated before the barrier
* occur before the LOAD operations generated after.
*/
#define rte_rmb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory")
/*------------------------- 16 bit atomic operations -------------------------*/
/**
* Atomic compare and set.
*
* (atomic) equivalent to:
* if (*dst == exp)
* *dst = src (all 16-bit words)
*
* @param dst
* The destination location into which the value will be written.
* @param exp
* The expected value.
* @param src
* The new value.
* @return
* Non-zero on success; 0 on failure.
*/
static inline int
rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
{
uint8_t res;
asm volatile(
MPLOCKED
"cmpxchgw %[src], %[dst];"
"sete %[res];"
: [res] "=a" (res), /* output */
[dst] "=m" (*dst)
: [src] "r" (src), /* input */
"a" (exp),
"m" (*dst)
: "memory"); /* no-clobber list */
return res;
}
/**
* The atomic counter structure.
*/
typedef struct {
volatile int16_t cnt; /**< An internal counter value. */
} rte_atomic16_t;
/**
* Static initializer for an atomic counter.
*/
#define RTE_ATOMIC16_INIT(val) { (val) }
/**
* Initialize an atomic counter.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_init(rte_atomic16_t *v)
{
v->cnt = 0;
}
/**
* Atomically read a 16-bit value from a counter.
*
* @param v
* A pointer to the atomic counter.
* @return
* The value of the counter.
*/
static inline int16_t
rte_atomic16_read(const rte_atomic16_t *v)
{
return v->cnt;
}
/**
* Atomically set a counter to a 16-bit value.
*
* @param v
* A pointer to the atomic counter.
* @param new_value
* The new value for the counter.
*/
static inline void
rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
{
v->cnt = new_value;
}
/**
* Atomically add a 16-bit value to an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
*/
static inline void
rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
{
asm volatile(
MPLOCKED
"addw %[inc], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [inc] "ir" (inc), /* input */
"m" (v->cnt)
);
}
/**
* Atomically subtract a 16-bit value from an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
*/
static inline void
rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
{
asm volatile(
MPLOCKED
"subw %[dec], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [dec] "ir" (dec), /* input */
"m" (v->cnt)
);
}
/**
* Atomically increment a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_inc(rte_atomic16_t *v)
{
asm volatile(
MPLOCKED
"incw %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically decrement a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_dec(rte_atomic16_t *v)
{
asm volatile(
MPLOCKED
"decw %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically add a 16-bit value to a counter and return the result.
*
* Atomically adds the 16-bits value (inc) to the atomic counter (v) and
* returns the value of v after addition.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
* @return
* The value of v after the addition.
*/
static inline int16_t
rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
{
int16_t prev = inc;
asm volatile(
MPLOCKED
"xaddw %[prev], %[cnt]"
: [prev] "+r" (prev), /* output */
[cnt] "=m" (v->cnt)
: "m" (v->cnt) /* input */
);
return (int16_t)(prev + inc);
}
/**
* Atomically subtract a 16-bit value from a counter and return
* the result.
*
* Atomically subtracts the 16-bit value (inc) from the atomic counter
* (v) and returns the value of v after the subtraction.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
* @return
* The value of v after the subtraction.
*/
static inline int16_t
rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
{
return rte_atomic16_add_return(v, (int16_t)-dec);
}
/**
* Atomically increment a 16-bit counter by one and test.
*
* Atomically increments the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the increment operation is 0; false otherwise.
*/
static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
{
uint8_t ret;
asm volatile(
MPLOCKED
"incw %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically decrement a 16-bit counter by one and test.
*
* Atomically decrements the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the decrement operation is 0; false otherwise.
*/
static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
{
uint8_t ret;
asm volatile(MPLOCKED
"decw %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically test and set a 16-bit atomic counter.
*
* If the counter value is already set, return 0 (failed). Otherwise, set
* the counter value to 1 and return 1 (success).
*
* @param v
* A pointer to the atomic counter.
* @return
* 0 if failed; else 1, success.
*/
static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
{
return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
}
/**
* Atomically set a 16-bit counter to 0.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void rte_atomic16_clear(rte_atomic16_t *v)
{
v->cnt = 0;
}
/*------------------------- 32 bit atomic operations -------------------------*/
/**
* Atomic compare and set.
*
* (atomic) equivalent to:
* if (*dst == exp)
* *dst = src (all 32-bit words)
*
* @param dst
* The destination location into which the value will be written.
* @param exp
* The expected value.
* @param src
* The new value.
* @return
* Non-zero on success; 0 on failure.
*/
static inline int
rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
{
uint8_t res;
asm volatile(
MPLOCKED
"cmpxchgl %[src], %[dst];"
"sete %[res];"
: [res] "=a" (res), /* output */
[dst] "=m" (*dst)
: [src] "r" (src), /* input */
"a" (exp),
"m" (*dst)
: "memory"); /* no-clobber list */
return res;
}
/**
* The atomic counter structure.
*/
typedef struct {
volatile int32_t cnt; /**< An internal counter value. */
} rte_atomic32_t;
/**
* Static initializer for an atomic counter.
*/
#define RTE_ATOMIC32_INIT(val) { (val) }
/**
* Initialize an atomic counter.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_init(rte_atomic32_t *v)
{
v->cnt = 0;
}
/**
* Atomically read a 32-bit value from a counter.
*
* @param v
* A pointer to the atomic counter.
* @return
* The value of the counter.
*/
static inline int32_t
rte_atomic32_read(const rte_atomic32_t *v)
{
return v->cnt;
}
/**
* Atomically set a counter to a 32-bit value.
*
* @param v
* A pointer to the atomic counter.
* @param new_value
* The new value for the counter.
*/
static inline void
rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
{
v->cnt = new_value;
}
/**
* Atomically add a 32-bit value to an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
*/
static inline void
rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
{
asm volatile(
MPLOCKED
"addl %[inc], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [inc] "ir" (inc), /* input */
"m" (v->cnt)
);
}
/**
* Atomically subtract a 32-bit value from an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
*/
static inline void
rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
{
asm volatile(
MPLOCKED
"subl %[dec], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [dec] "ir" (dec), /* input */
"m" (v->cnt)
);
}
/**
* Atomically increment a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_inc(rte_atomic32_t *v)
{
asm volatile(
MPLOCKED
"incl %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically decrement a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_dec(rte_atomic32_t *v)
{
asm volatile(
MPLOCKED
"decl %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically add a 32-bit value to a counter and return the result.
*
* Atomically adds the 32-bits value (inc) to the atomic counter (v) and
* returns the value of v after addition.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
* @return
* The value of v after the addition.
*/
static inline int32_t
rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
{
int32_t prev = inc;
asm volatile(
MPLOCKED
"xaddl %[prev], %[cnt]"
: [prev] "+r" (prev), /* output */
[cnt] "=m" (v->cnt)
: "m" (v->cnt) /* input */
);
return (int32_t)(prev + inc);
}
/**
* Atomically subtract a 32-bit value from a counter and return
* the result.
*
* Atomically subtracts the 32-bit value (inc) from the atomic counter
* (v) and returns the value of v after the subtraction.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
* @return
* The value of v after the subtraction.
*/
static inline int32_t
rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
{
return rte_atomic32_add_return(v, -dec);
}
/**
* Atomically increment a 32-bit counter by one and test.
*
* Atomically increments the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the increment operation is 0; false otherwise.
*/
static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
{
uint8_t ret;
asm volatile(
MPLOCKED
"incl %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically decrement a 32-bit counter by one and test.
*
* Atomically decrements the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the decrement operation is 0; false otherwise.
*/
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
{
uint8_t ret;
asm volatile(MPLOCKED
"decl %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically test and set a 32-bit atomic counter.
*
* If the counter value is already set, return 0 (failed). Otherwise, set
* the counter value to 1 and return 1 (success).
*
* @param v
* A pointer to the atomic counter.
* @return
* 0 if failed; else 1, success.
*/
static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
{
return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
}
/**
* Atomically set a 32-bit counter to 0.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void rte_atomic32_clear(rte_atomic32_t *v)
{
v->cnt = 0;
}
/*------------------------- 64 bit atomic operations -------------------------*/

View File

@ -51,10 +51,12 @@ extern "C" {
#endif
#include <stdint.h>
#include "arch/rte_atomic.h"
#ifdef __DOXYGEN__
#if RTE_MAX_LCORE == 1
#define MPLOCKED /**< No need to insert MP lock prefix. */
#else
#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
#endif
/**
* General memory barrier.
@ -80,6 +82,11 @@ extern "C" {
*/
#define rte_rmb() asm volatile("lfence;" : : : "memory")
/**
* @file
* Atomic Operations on x86_64
*/
/*------------------------- 16 bit atomic operations -------------------------*/
/**
@ -99,7 +106,22 @@ extern "C" {
* Non-zero on success; 0 on failure.
*/
static inline int
rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src);
rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
{
uint8_t res;
asm volatile(
MPLOCKED
"cmpxchgw %[src], %[dst];"
"sete %[res];"
: [res] "=a" (res), /* output */
[dst] "=m" (*dst)
: [src] "r" (src), /* input */
"a" (exp),
"m" (*dst)
: "memory"); /* no-clobber list */
return res;
}
/**
* The atomic counter structure.
@ -134,7 +156,10 @@ rte_atomic16_init(rte_atomic16_t *v)
* The value of the counter.
*/
static inline int16_t
rte_atomic16_read(const rte_atomic16_t *v);
rte_atomic16_read(const rte_atomic16_t *v)
{
return v->cnt;
}
/**
* Atomically set a counter to a 16-bit value.
@ -145,7 +170,10 @@ rte_atomic16_read(const rte_atomic16_t *v);
* The new value for the counter.
*/
static inline void
rte_atomic16_set(rte_atomic16_t *v, int16_t new_value);
rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
{
v->cnt = new_value;
}
/**
* Atomically add a 16-bit value to an atomic counter.
@ -156,7 +184,10 @@ rte_atomic16_set(rte_atomic16_t *v, int16_t new_value);
* The value to be added to the counter.
*/
static inline void
rte_atomic16_add(rte_atomic16_t *v, int16_t inc);
rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
{
__sync_fetch_and_add(&v->cnt, inc);
}
/**
* Atomically subtract a 16-bit value from an atomic counter.
@ -167,7 +198,10 @@ rte_atomic16_add(rte_atomic16_t *v, int16_t inc);
* The value to be subtracted from the counter.
*/
static inline void
rte_atomic16_sub(rte_atomic16_t *v, int16_t dec);
rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
{
__sync_fetch_and_sub(&v->cnt, dec);
}
/**
* Atomically increment a counter by one.
@ -176,7 +210,15 @@ rte_atomic16_sub(rte_atomic16_t *v, int16_t dec);
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_inc(rte_atomic16_t *v);
rte_atomic16_inc(rte_atomic16_t *v)
{
asm volatile(
MPLOCKED
"incw %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically decrement a counter by one.
@ -185,7 +227,15 @@ rte_atomic16_inc(rte_atomic16_t *v);
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_dec(rte_atomic16_t *v);
rte_atomic16_dec(rte_atomic16_t *v)
{
asm volatile(
MPLOCKED
"decw %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically add a 16-bit value to a counter and return the result.
@ -201,7 +251,10 @@ rte_atomic16_dec(rte_atomic16_t *v);
* The value of v after the addition.
*/
static inline int16_t
rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc);
rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
{
return __sync_add_and_fetch(&v->cnt, inc);
}
/**
* Atomically subtract a 16-bit value from a counter and return
@ -218,7 +271,10 @@ rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc);
* The value of v after the subtraction.
*/
static inline int16_t
rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec);
rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
{
return __sync_sub_and_fetch(&v->cnt, dec);
}
/**
* Atomically increment a 16-bit counter by one and test.
@ -231,8 +287,19 @@ rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec);
* @return
* True if the result after the increment operation is 0; false otherwise.
*/
static inline int
rte_atomic16_inc_and_test(rte_atomic16_t *v);
static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
{
uint8_t ret;
asm volatile(
MPLOCKED
"incw %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically decrement a 16-bit counter by one and test.
@ -245,8 +312,18 @@ rte_atomic16_inc_and_test(rte_atomic16_t *v);
* @return
* True if the result after the decrement operation is 0; false otherwise.
*/
static inline int
rte_atomic16_dec_and_test(rte_atomic16_t *v);
static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
{
uint8_t ret;
asm volatile(MPLOCKED
"decw %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically test and set a 16-bit atomic counter.
@ -259,8 +336,10 @@ rte_atomic16_dec_and_test(rte_atomic16_t *v);
* @return
* 0 if failed; else 1, success.
*/
static inline int
rte_atomic16_test_and_set(rte_atomic16_t *v);
static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
{
return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
}
/**
* Atomically set a 16-bit counter to 0.
@ -268,8 +347,10 @@ rte_atomic16_test_and_set(rte_atomic16_t *v);
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_clear(rte_atomic16_t *v);
static inline void rte_atomic16_clear(rte_atomic16_t *v)
{
v->cnt = 0;
}
/*------------------------- 32 bit atomic operations -------------------------*/
@ -290,7 +371,22 @@ rte_atomic16_clear(rte_atomic16_t *v);
* Non-zero on success; 0 on failure.
*/
static inline int
rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src);
rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
{
uint8_t res;
asm volatile(
MPLOCKED
"cmpxchgl %[src], %[dst];"
"sete %[res];"
: [res] "=a" (res), /* output */
[dst] "=m" (*dst)
: [src] "r" (src), /* input */
"a" (exp),
"m" (*dst)
: "memory"); /* no-clobber list */
return res;
}
/**
* The atomic counter structure.
@ -311,7 +407,10 @@ typedef struct {
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_init(rte_atomic32_t *v);
rte_atomic32_init(rte_atomic32_t *v)
{
v->cnt = 0;
}
/**
* Atomically read a 32-bit value from a counter.
@ -322,7 +421,10 @@ rte_atomic32_init(rte_atomic32_t *v);
* The value of the counter.
*/
static inline int32_t
rte_atomic32_read(const rte_atomic32_t *v);
rte_atomic32_read(const rte_atomic32_t *v)
{
return v->cnt;
}
/**
* Atomically set a counter to a 32-bit value.
@ -333,7 +435,10 @@ rte_atomic32_read(const rte_atomic32_t *v);
* The new value for the counter.
*/
static inline void
rte_atomic32_set(rte_atomic32_t *v, int32_t new_value);
rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
{
v->cnt = new_value;
}
/**
* Atomically add a 32-bit value to an atomic counter.
@ -344,7 +449,10 @@ rte_atomic32_set(rte_atomic32_t *v, int32_t new_value);
* The value to be added to the counter.
*/
static inline void
rte_atomic32_add(rte_atomic32_t *v, int32_t inc);
rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
{
__sync_fetch_and_add(&v->cnt, inc);
}
/**
* Atomically subtract a 32-bit value from an atomic counter.
@ -355,7 +463,10 @@ rte_atomic32_add(rte_atomic32_t *v, int32_t inc);
* The value to be subtracted from the counter.
*/
static inline void
rte_atomic32_sub(rte_atomic32_t *v, int32_t dec);
rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
{
__sync_fetch_and_sub(&v->cnt, dec);
}
/**
* Atomically increment a counter by one.
@ -364,7 +475,15 @@ rte_atomic32_sub(rte_atomic32_t *v, int32_t dec);
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_inc(rte_atomic32_t *v);
rte_atomic32_inc(rte_atomic32_t *v)
{
asm volatile(
MPLOCKED
"incl %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically decrement a counter by one.
@ -373,7 +492,15 @@ rte_atomic32_inc(rte_atomic32_t *v);
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_dec(rte_atomic32_t *v);
rte_atomic32_dec(rte_atomic32_t *v)
{
asm volatile(
MPLOCKED
"decl %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically add a 32-bit value to a counter and return the result.
@ -389,7 +516,10 @@ rte_atomic32_dec(rte_atomic32_t *v);
* The value of v after the addition.
*/
static inline int32_t
rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc);
rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
{
return __sync_add_and_fetch(&v->cnt, inc);
}
/**
* Atomically subtract a 32-bit value from a counter and return
@ -406,7 +536,10 @@ rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc);
* The value of v after the subtraction.
*/
static inline int32_t
rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec);
rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
{
return __sync_sub_and_fetch(&v->cnt, dec);
}
/**
* Atomically increment a 32-bit counter by one and test.
@ -419,8 +552,19 @@ rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec);
* @return
* True if the result after the increment operation is 0; false otherwise.
*/
static inline int
rte_atomic32_inc_and_test(rte_atomic32_t *v);
static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
{
uint8_t ret;
asm volatile(
MPLOCKED
"incl %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically decrement a 32-bit counter by one and test.
@ -433,8 +577,18 @@ rte_atomic32_inc_and_test(rte_atomic32_t *v);
* @return
* True if the result after the decrement operation is 0; false otherwise.
*/
static inline int
rte_atomic32_dec_and_test(rte_atomic32_t *v);
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
{
uint8_t ret;
asm volatile(MPLOCKED
"decl %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically test and set a 32-bit atomic counter.
@ -447,8 +601,10 @@ rte_atomic32_dec_and_test(rte_atomic32_t *v);
* @return
* 0 if failed; else 1, success.
*/
static inline int
rte_atomic32_test_and_set(rte_atomic32_t *v);
static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
{
return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
}
/**
* Atomically set a 32-bit counter to 0.
@ -456,8 +612,16 @@ rte_atomic32_test_and_set(rte_atomic32_t *v);
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_clear(rte_atomic32_t *v);
static inline void rte_atomic32_clear(rte_atomic32_t *v)
{
v->cnt = 0;
}
/* any other functions are in arch specific files */
#include "arch/rte_atomic.h"
#ifdef __DOXYGEN__
/*------------------------- 64 bit atomic operations -------------------------*/

View File

@ -42,6 +42,7 @@
*/
#include <stdint.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
@ -77,8 +78,8 @@ struct rte_memseg {
void *addr; /**< Start virtual address. */
uint64_t addr_64; /**< Makes sure addr is always 64 bits */
};
uint64_t len; /**< Length of the segment. */
uint64_t hugepage_sz; /**< The pagesize of underlying memory */
size_t len; /**< Length of the segment. */
size_t hugepage_sz; /**< The pagesize of underlying memory */
int32_t socket_id; /**< NUMA socket ID. */
uint32_t nchannel; /**< Number of channels. */
uint32_t nrank; /**< Number of ranks. */

View File

@ -76,9 +76,9 @@ struct rte_memzone {
void *addr; /**< Start virtual address. */
uint64_t addr_64; /**< Makes sure addr is always 64-bits */
};
uint64_t len; /**< Length of the memzone. */
size_t len; /**< Length of the memzone. */
uint64_t hugepage_sz; /**< The page size of underlying memory */
size_t hugepage_sz; /**< The page size of underlying memory */
int32_t socket_id; /**< NUMA socket ID. */
@ -124,7 +124,7 @@ struct rte_memzone {
* - EINVAL - invalid parameters
*/
const struct rte_memzone *rte_memzone_reserve(const char *name,
uint64_t len, int socket_id,
size_t len, int socket_id,
unsigned flags);
/**
@ -171,7 +171,7 @@ const struct rte_memzone *rte_memzone_reserve(const char *name,
* - EINVAL - invalid parameters
*/
const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
uint64_t len, int socket_id, unsigned flags,
size_t len, int socket_id, unsigned flags,
unsigned align);
/**

View File

@ -45,612 +45,6 @@
#ifndef _RTE_X86_64_ATOMIC_H_
#define _RTE_X86_64_ATOMIC_H_
/**
* @file
* Atomic Operations on x86_64
*/
#if RTE_MAX_LCORE == 1
#define MPLOCKED /**< No need to insert MP lock prefix. */
#else
#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
#endif
/**
* General memory barrier.
*
* Guarantees that the LOAD and STORE operations generated before the
* barrier occur before the LOAD and STORE operations generated after.
*/
#define rte_mb() asm volatile("mfence;" : : : "memory")
/**
* Write memory barrier.
*
* Guarantees that the STORE operations generated before the barrier
* occur before the STORE operations generated after.
*/
#define rte_wmb() asm volatile("sfence;" : : : "memory")
/**
* Read memory barrier.
*
* Guarantees that the LOAD operations generated before the barrier
* occur before the LOAD operations generated after.
*/
#define rte_rmb() asm volatile("lfence;" : : : "memory")
/*------------------------- 16 bit atomic operations -------------------------*/
/**
* Atomic compare and set.
*
* (atomic) equivalent to:
* if (*dst == exp)
* *dst = src (all 16-bit words)
*
* @param dst
* The destination location into which the value will be written.
* @param exp
* The expected value.
* @param src
* The new value.
* @return
* Non-zero on success; 0 on failure.
*/
static inline int
rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
{
uint8_t res;
asm volatile(
MPLOCKED
"cmpxchgw %[src], %[dst];"
"sete %[res];"
: [res] "=a" (res), /* output */
[dst] "=m" (*dst)
: [src] "r" (src), /* input */
"a" (exp),
"m" (*dst)
: "memory"); /* no-clobber list */
return res;
}
/**
* The atomic counter structure.
*/
typedef struct {
volatile int16_t cnt; /**< An internal counter value. */
} rte_atomic16_t;
/**
* Static initializer for an atomic counter.
*/
#define RTE_ATOMIC16_INIT(val) { (val) }
/**
* Initialize an atomic counter.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_init(rte_atomic16_t *v)
{
v->cnt = 0;
}
/**
* Atomically read a 16-bit value from a counter.
*
* @param v
* A pointer to the atomic counter.
* @return
* The value of the counter.
*/
static inline int16_t
rte_atomic16_read(const rte_atomic16_t *v)
{
return v->cnt;
}
/**
* Atomically set a counter to a 16-bit value.
*
* @param v
* A pointer to the atomic counter.
* @param new_value
* The new value for the counter.
*/
static inline void
rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
{
v->cnt = new_value;
}
/**
* Atomically add a 16-bit value to an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
*/
static inline void
rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
{
asm volatile(
MPLOCKED
"addw %[inc], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [inc] "ir" (inc), /* input */
"m" (v->cnt)
);
}
/**
* Atomically subtract a 16-bit value from an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
*/
static inline void
rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
{
asm volatile(
MPLOCKED
"subw %[dec], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [dec] "ir" (dec), /* input */
"m" (v->cnt)
);
}
/**
* Atomically increment a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_inc(rte_atomic16_t *v)
{
asm volatile(
MPLOCKED
"incw %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically decrement a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic16_dec(rte_atomic16_t *v)
{
asm volatile(
MPLOCKED
"decw %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically add a 16-bit value to a counter and return the result.
*
* Atomically adds the 16-bits value (inc) to the atomic counter (v) and
* returns the value of v after addition.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
* @return
* The value of v after the addition.
*/
static inline int16_t
rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
{
int16_t prev = inc;
asm volatile(
MPLOCKED
"xaddw %[prev], %[cnt]"
: [prev] "+r" (prev), /* output */
[cnt] "=m" (v->cnt)
: "m" (v->cnt) /* input */
);
return (int16_t)(prev + inc);
}
/**
* Atomically subtract a 16-bit value from a counter and return
* the result.
*
* Atomically subtracts the 16-bit value (inc) from the atomic counter
* (v) and returns the value of v after the subtraction.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
* @return
* The value of v after the subtraction.
*/
static inline int16_t
rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
{
return rte_atomic16_add_return(v, (int16_t)-dec);
}
/**
* Atomically increment a 16-bit counter by one and test.
*
* Atomically increments the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the increment operation is 0; false otherwise.
*/
static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
{
uint8_t ret;
asm volatile(
MPLOCKED
"incw %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically decrement a 16-bit counter by one and test.
*
* Atomically decrements the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the decrement operation is 0; false otherwise.
*/
static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
{
uint8_t ret;
asm volatile(MPLOCKED
"decw %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically test and set a 16-bit atomic counter.
*
* If the counter value is already set, return 0 (failed). Otherwise, set
* the counter value to 1 and return 1 (success).
*
* @param v
* A pointer to the atomic counter.
* @return
* 0 if failed; else 1, success.
*/
static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
{
return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
}
/**
* Atomically set a 16-bit counter to 0.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void rte_atomic16_clear(rte_atomic16_t *v)
{
v->cnt = 0;
}
/*------------------------- 32 bit atomic operations -------------------------*/
/**
* Atomic compare and set.
*
* (atomic) equivalent to:
* if (*dst == exp)
* *dst = src (all 32-bit words)
*
* @param dst
* The destination location into which the value will be written.
* @param exp
* The expected value.
* @param src
* The new value.
* @return
* Non-zero on success; 0 on failure.
*/
static inline int
rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
{
uint8_t res;
asm volatile(
MPLOCKED
"cmpxchgl %[src], %[dst];"
"sete %[res];"
: [res] "=a" (res), /* output */
[dst] "=m" (*dst)
: [src] "r" (src), /* input */
"a" (exp),
"m" (*dst)
: "memory"); /* no-clobber list */
return res;
}
/**
* The atomic counter structure.
*/
typedef struct {
volatile int32_t cnt; /**< An internal counter value. */
} rte_atomic32_t;
/**
* Static initializer for an atomic counter.
*/
#define RTE_ATOMIC32_INIT(val) { (val) }
/**
* Initialize an atomic counter.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_init(rte_atomic32_t *v)
{
v->cnt = 0;
}
/**
* Atomically read a 32-bit value from a counter.
*
* @param v
* A pointer to the atomic counter.
* @return
* The value of the counter.
*/
static inline int32_t
rte_atomic32_read(const rte_atomic32_t *v)
{
return v->cnt;
}
/**
* Atomically set a counter to a 32-bit value.
*
* @param v
* A pointer to the atomic counter.
* @param new_value
* The new value for the counter.
*/
static inline void
rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
{
v->cnt = new_value;
}
/**
* Atomically add a 32-bit value to an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
*/
static inline void
rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
{
asm volatile(
MPLOCKED
"addl %[inc], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [inc] "ir" (inc), /* input */
"m" (v->cnt)
);
}
/**
* Atomically subtract a 32-bit value from an atomic counter.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
*/
static inline void
rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
{
asm volatile(
MPLOCKED
"subl %[dec], %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: [dec] "ir" (dec), /* input */
"m" (v->cnt)
);
}
/**
* Atomically increment a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_inc(rte_atomic32_t *v)
{
asm volatile(
MPLOCKED
"incl %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically decrement a counter by one.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void
rte_atomic32_dec(rte_atomic32_t *v)
{
asm volatile(
MPLOCKED
"decl %[cnt]"
: [cnt] "=m" (v->cnt) /* output */
: "m" (v->cnt) /* input */
);
}
/**
* Atomically add a 32-bit value to a counter and return the result.
*
* Atomically adds the 32-bits value (inc) to the atomic counter (v) and
* returns the value of v after addition.
*
* @param v
* A pointer to the atomic counter.
* @param inc
* The value to be added to the counter.
* @return
* The value of v after the addition.
*/
static inline int32_t
rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
{
int32_t prev = inc;
asm volatile(
MPLOCKED
"xaddl %[prev], %[cnt]"
: [prev] "+r" (prev), /* output */
[cnt] "=m" (v->cnt)
: "m" (v->cnt) /* input */
);
return (int32_t)(prev + inc);
}
/**
* Atomically subtract a 32-bit value from a counter and return
* the result.
*
* Atomically subtracts the 32-bit value (inc) from the atomic counter
* (v) and returns the value of v after the subtraction.
*
* @param v
* A pointer to the atomic counter.
* @param dec
* The value to be subtracted from the counter.
* @return
* The value of v after the subtraction.
*/
static inline int32_t
rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
{
return rte_atomic32_add_return(v, -dec);
}
/**
* Atomically increment a 32-bit counter by one and test.
*
* Atomically increments the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the increment operation is 0; false otherwise.
*/
static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
{
uint8_t ret;
asm volatile(
MPLOCKED
"incl %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically decrement a 32-bit counter by one and test.
*
* Atomically decrements the atomic counter (v) by one and returns true if
* the result is 0, or false in all other cases.
*
* @param v
* A pointer to the atomic counter.
* @return
* True if the result after the decrement operation is 0; false otherwise.
*/
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
{
uint8_t ret;
asm volatile(MPLOCKED
"decl %[cnt] ; "
"sete %[ret]"
: [cnt] "+m" (v->cnt), /* output */
[ret] "=qm" (ret)
);
return (ret != 0);
}
/**
* Atomically test and set a 32-bit atomic counter.
*
* If the counter value is already set, return 0 (failed). Otherwise, set
* the counter value to 1 and return 1 (success).
*
* @param v
* A pointer to the atomic counter.
* @return
* 0 if failed; else 1, success.
*/
static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
{
return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
}
/**
* Atomically set a 32-bit counter to 0.
*
* @param v
* A pointer to the atomic counter.
*/
static inline void rte_atomic32_clear(rte_atomic32_t *v)
{
v->cnt = 0;
}
/*------------------------- 64 bit atomic operations -------------------------*/

View File

@ -416,7 +416,7 @@ eal_parse_socket_mem(char *socket_mem)
return 0;
}
static inline uint64_t
static inline size_t
eal_get_hugepage_mem_size(void)
{
uint64_t size = 0;
@ -431,7 +431,7 @@ eal_get_hugepage_mem_size(void)
}
}
return (size);
return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
}
static enum rte_proc_type_t

View File

@ -190,13 +190,13 @@ increase_open_file_limit(void)
* which is a multiple of hugepage size.
*/
static void *
get_virtual_area(uint64_t *size, uint64_t hugepage_sz)
get_virtual_area(size_t *size, size_t hugepage_sz)
{
void *addr;
int fd;
long aligned_addr;
RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%"PRIx64" bytes\n", *size);
RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zu bytes\n", *size);
fd = open("/dev/zero", O_RDONLY);
if (fd < 0){
@ -224,7 +224,7 @@ get_virtual_area(uint64_t *size, uint64_t hugepage_sz)
aligned_addr &= (~(hugepage_sz - 1));
addr = (void *)(aligned_addr);
RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%"PRIx64")\n",
RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
addr, *size);
return addr;
@ -245,10 +245,10 @@ map_all_hugepages(struct hugepage *hugepg_tbl,
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
uint64_t vma_len = 0;
size_t vma_len = 0;
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
size_t hugepage_sz = hpi->hugepage_sz;
if (orig) {
hugepg_tbl[i].file_id = i;

View File

@ -35,6 +35,8 @@
#ifndef RTE_LINUXAPP_HUGEPAGES_H_
#define RTE_LINUXAPP_HUGEPAGES_H_
#include <stddef.h>
#define MAX_HUGEPAGE_PATH PATH_MAX
/**
@ -45,7 +47,7 @@ struct hugepage {
void *orig_va; /**< virtual addr of first mmap() */
void *final_va; /**< virtual addr of 2nd mmap() */
uint64_t physaddr; /**< physical addr */
uint64_t size; /**< the page size */
size_t size; /**< the page size */
int socket_id; /**< NUMA socket ID */
int file_id; /**< the '%d' in HUGEFILE_FMT */
int memseg_id; /**< the memory segment to which page belongs */

View File

@ -49,7 +49,7 @@
* mount points of hugepages
*/
struct hugepage_info {
uint64_t hugepage_sz; /**< size of a huge page */
size_t hugepage_sz; /**< size of a huge page */
const char *hugedir; /**< dir where hugetlbfs is mounted */
uint32_t num_pages[RTE_MAX_NUMA_NODES];
/**< number of hugepages of that size on each socket */
@ -60,7 +60,7 @@ struct hugepage_info {
* internal configuration
*/
struct internal_config {
volatile uint64_t memory; /**< amount of asked memory */
volatile size_t memory; /**< amount of asked memory */
volatile unsigned force_nchannel; /**< force number of channels */
volatile unsigned force_nrank; /**< force number of ranks */
volatile unsigned no_hugetlbfs; /**< true to disable hugetlbfs */

View File

@ -133,7 +133,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
struct rte_mempool *mp = NULL;
struct rte_ring *r;
const struct rte_memzone *mz;
uint64_t mempool_size, total_elt_size;
size_t mempool_size, total_elt_size;
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
int rg_flags = 0;
uint32_t header_size, trailer_size;

View File

@ -1095,7 +1095,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
if ((mz = rte_memzone_lookup(z_name)) != 0)
return (mz);
return rte_memzone_reserve(z_name, (uint64_t) ring_size, socket_id, 0);
return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
}
static void

View File

@ -1076,7 +1076,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
if (mz)
return mz;
return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size,
return rte_memzone_reserve_aligned(z_name, ring_size,
socket_id, 0, IGB_ALIGN);
}

View File

@ -1744,7 +1744,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
if (mz)
return mz;
return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size,
return rte_memzone_reserve_aligned(z_name, ring_size,
socket_id, 0, IXGBE_ALIGN);
}