numam-dpdk/drivers/mempool/stack/rte_mempool_stack.c
Gage Eads e75bc77f98 mempool/stack: add lock-free stack mempool handler
This commit adds support for lock-free (linked list based) stack mempool
handler.

In mempool_perf_autotest the lock-based stack outperforms the
lock-free handler for certain lcore/alloc count/free count
combinations*, however:
- For applications with preemptible pthreads, a standard (lock-based)
  stack's worst-case performance (i.e. one thread being preempted while
  holding the spinlock) is much worse than the lock-free stack's.
- Using per-thread mempool caches will largely mitigate the performance
  difference.

*Test setup: x86_64 build with default config, dual-socket Xeon E5-2699 v4,
running on isolcpus cores with a tickless scheduler. The lock-based stack's
rate_persec was 0.6x-3.5x the lock-free stack's.

Signed-off-by: Gage Eads <gage.eads@intel.com>
Reviewed-by: Olivier Matz <olivier.matz@6wind.com>
2019-04-04 22:06:16 +02:00

98 lines
1.8 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2016-2019 Intel Corporation
*/
#include <stdio.h>
#include <rte_mempool.h>
#include <rte_stack.h>
static int
__stack_alloc(struct rte_mempool *mp, uint32_t flags)
{
char name[RTE_STACK_NAMESIZE];
struct rte_stack *s;
int ret;
ret = snprintf(name, sizeof(name),
RTE_MEMPOOL_MZ_FORMAT, mp->name);
if (ret < 0 || ret >= (int)sizeof(name)) {
rte_errno = ENAMETOOLONG;
return -rte_errno;
}
s = rte_stack_create(name, mp->size, mp->socket_id, flags);
if (s == NULL)
return -rte_errno;
mp->pool_data = s;
return 0;
}
static int
stack_alloc(struct rte_mempool *mp)
{
return __stack_alloc(mp, 0);
}
static int
lf_stack_alloc(struct rte_mempool *mp)
{
return __stack_alloc(mp, RTE_STACK_F_LF);
}
static int
stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned int n)
{
struct rte_stack *s = mp->pool_data;
return rte_stack_push(s, obj_table, n) == 0 ? -ENOBUFS : 0;
}
static int
stack_dequeue(struct rte_mempool *mp, void **obj_table,
unsigned int n)
{
struct rte_stack *s = mp->pool_data;
return rte_stack_pop(s, obj_table, n) == 0 ? -ENOBUFS : 0;
}
static unsigned
stack_get_count(const struct rte_mempool *mp)
{
struct rte_stack *s = mp->pool_data;
return rte_stack_count(s);
}
static void
stack_free(struct rte_mempool *mp)
{
struct rte_stack *s = mp->pool_data;
rte_stack_free(s);
}
static struct rte_mempool_ops ops_stack = {
.name = "stack",
.alloc = stack_alloc,
.free = stack_free,
.enqueue = stack_enqueue,
.dequeue = stack_dequeue,
.get_count = stack_get_count
};
static struct rte_mempool_ops ops_lf_stack = {
.name = "lf_stack",
.alloc = lf_stack_alloc,
.free = stack_free,
.enqueue = stack_enqueue,
.dequeue = stack_dequeue,
.get_count = stack_get_count
};
MEMPOOL_REGISTER_OPS(ops_stack);
MEMPOOL_REGISTER_OPS(ops_lf_stack);