stack: introduce stack library

The rte_stack library provides an API for configuration and use of a
bounded stack of pointers. Push and pop operations are MT-safe, allowing
concurrent access, and the interface supports pushing and popping multiple
pointers at a time.

The library's interface is modeled after another DPDK data structure,
rte_ring, and its lock-based implementation is derived from the stack
mempool handler. An upcoming commit will migrate the stack mempool handler
to rte_stack.

Signed-off-by: Gage Eads <gage.eads@intel.com>
Reviewed-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
This commit is contained in:
Gage Eads 2019-04-03 18:20:13 -05:00 committed by Thomas Monjalon
parent 721ac9f9e0
commit 05d3b5283c
18 changed files with 667 additions and 1 deletions

@ -315,6 +315,12 @@ F: doc/guides/prog_guide/ring_lib.rst
F: app/test/test_ring*
F: app/test/test_func_reentrancy.c
Stack - EXPERIMENTAL
M: Gage Eads <gage.eads@intel.com>
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_stack/
F: doc/guides/prog_guide/stack_lib.rst
Packet buffer
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_mbuf/

@ -732,6 +732,11 @@ CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=y
#
CONFIG_RTE_LIBRTE_RING=y
#
# Compile librte_stack
#
CONFIG_RTE_LIBRTE_STACK=y
#
# Compile librte_mempool
#

@ -125,6 +125,7 @@ The public API headers are grouped by topics:
[mbuf] (@ref rte_mbuf.h),
[mbuf pool ops] (@ref rte_mbuf_pool_ops.h),
[ring] (@ref rte_ring.h),
[stack] (@ref rte_stack.h),
[tailq] (@ref rte_tailq.h),
[bitmap] (@ref rte_bitmap.h)

@ -55,6 +55,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/lib/librte_ring \
@TOPDIR@/lib/librte_sched \
@TOPDIR@/lib/librte_security \
@TOPDIR@/lib/librte_stack \
@TOPDIR@/lib/librte_table \
@TOPDIR@/lib/librte_telemetry \
@TOPDIR@/lib/librte_timer \

@ -13,6 +13,7 @@ Programmer's Guide
env_abstraction_layer
service_cores
ring_lib
stack_lib
mempool_lib
mbuf_lib
poll_mode_drv

@ -0,0 +1,28 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2019 Intel Corporation.
Stack Library
=============
DPDK's stack library provides an API for configuration and use of a bounded
stack of pointers.
The stack library provides the following basic operations:
* Create a uniquely named stack of a user-specified size and using a
user-specified socket.
* Push and pop a burst of one or more stack objects (pointers). These function
are multi-threading safe.
* Free a previously created stack.
* Lookup a pointer to a stack by its name.
* Query a stack's current depth and number of free entries.
Implementation
~~~~~~~~~~~~~~
The stack consists of a contiguous array of pointers, a current index, and a
spinlock. Accesses to the stack are made multi-thread safe by the spinlock.

@ -54,6 +54,12 @@ New Features
Also, make sure to start the actual text at the margin.
=========================================================
* **Added Stack API.**
Added a new stack API for configuration and use of a bounded stack of
pointers. The API provides MT-safe push and pop operations that can operate
on one or more pointers per operation.
* **Updated KNI module and PMD.**
Updated the KNI kernel module to set the max_mtu according to the given
@ -264,6 +270,7 @@ The libraries prepended with a plus sign were incremented in this version.
librte_ring.so.2
librte_sched.so.2
librte_security.so.2
+ librte_stack.so.1
librte_table.so.3
librte_timer.so.1
librte_vhost.so.4

@ -10,6 +10,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_PCI) += librte_pci
DEPDIRS-librte_pci := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DEPDIRS-librte_ring := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_STACK) += librte_stack
DEPDIRS-librte_stack := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DEPDIRS-librte_mempool := librte_eal librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf

25
lib/librte_stack/Makefile Normal file

@ -0,0 +1,25 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2019 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
# library name
LIB = librte_stack.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal
EXPORT_MAP := rte_stack_version.map
LIBABIVER := 1
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_STACK) := rte_stack.c \
rte_stack_std.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_STACK)-include := rte_stack.h \
rte_stack_std.h
include $(RTE_SDK)/mk/rte.lib.mk

@ -0,0 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2019 Intel Corporation
allow_experimental_apis = true
version = 1
sources = files('rte_stack.c', 'rte_stack_std.c')
headers = files('rte_stack.h', 'rte_stack_std.h')

@ -0,0 +1,182 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Intel Corporation
*/
#include <string.h>
#include <rte_atomic.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_rwlock.h>
#include <rte_tailq.h>
#include "rte_stack.h"
#include "rte_stack_pvt.h"
int stack_logtype;
TAILQ_HEAD(rte_stack_list, rte_tailq_entry);
static struct rte_tailq_elem rte_stack_tailq = {
.name = RTE_TAILQ_STACK_NAME,
};
EAL_REGISTER_TAILQ(rte_stack_tailq)
static void
rte_stack_init(struct rte_stack *s)
{
memset(s, 0, sizeof(*s));
rte_stack_std_init(s);
}
static ssize_t
rte_stack_get_memsize(unsigned int count)
{
return rte_stack_std_get_memsize(count);
}
struct rte_stack *
rte_stack_create(const char *name, unsigned int count, int socket_id,
uint32_t flags)
{
char mz_name[RTE_MEMZONE_NAMESIZE];
struct rte_stack_list *stack_list;
const struct rte_memzone *mz;
struct rte_tailq_entry *te;
struct rte_stack *s;
unsigned int sz;
int ret;
RTE_SET_USED(flags);
sz = rte_stack_get_memsize(count);
ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
RTE_STACK_MZ_PREFIX, name);
if (ret < 0 || ret >= (int)sizeof(mz_name)) {
rte_errno = ENAMETOOLONG;
return NULL;
}
te = rte_zmalloc("STACK_TAILQ_ENTRY", sizeof(*te), 0);
if (te == NULL) {
STACK_LOG_ERR("Cannot reserve memory for tailq\n");
rte_errno = ENOMEM;
return NULL;
}
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
mz = rte_memzone_reserve_aligned(mz_name, sz, socket_id,
0, __alignof__(*s));
if (mz == NULL) {
STACK_LOG_ERR("Cannot reserve stack memzone!\n");
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
rte_free(te);
return NULL;
}
s = mz->addr;
rte_stack_init(s);
/* Store the name for later lookups */
ret = snprintf(s->name, sizeof(s->name), "%s", name);
if (ret < 0 || ret >= (int)sizeof(s->name)) {
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
rte_errno = ENAMETOOLONG;
rte_free(te);
rte_memzone_free(mz);
return NULL;
}
s->memzone = mz;
s->capacity = count;
s->flags = flags;
te->data = s;
stack_list = RTE_TAILQ_CAST(rte_stack_tailq.head, rte_stack_list);
TAILQ_INSERT_TAIL(stack_list, te, next);
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
return s;
}
void
rte_stack_free(struct rte_stack *s)
{
struct rte_stack_list *stack_list;
struct rte_tailq_entry *te;
if (s == NULL)
return;
stack_list = RTE_TAILQ_CAST(rte_stack_tailq.head, rte_stack_list);
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
/* find out tailq entry */
TAILQ_FOREACH(te, stack_list, next) {
if (te->data == s)
break;
}
if (te == NULL) {
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
return;
}
TAILQ_REMOVE(stack_list, te, next);
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
rte_free(te);
rte_memzone_free(s->memzone);
}
struct rte_stack *
rte_stack_lookup(const char *name)
{
struct rte_stack_list *stack_list;
struct rte_tailq_entry *te;
struct rte_stack *r = NULL;
if (name == NULL) {
rte_errno = EINVAL;
return NULL;
}
stack_list = RTE_TAILQ_CAST(rte_stack_tailq.head, rte_stack_list);
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
TAILQ_FOREACH(te, stack_list, next) {
r = (struct rte_stack *) te->data;
if (strncmp(name, r->name, RTE_STACK_NAMESIZE) == 0)
break;
}
rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
if (te == NULL) {
rte_errno = ENOENT;
return NULL;
}
return r;
}
RTE_INIT(librte_stack_init_log)
{
stack_logtype = rte_log_register("lib.stack");
if (stack_logtype >= 0)
rte_log_set_level(stack_logtype, RTE_LOG_NOTICE);
}

@ -0,0 +1,209 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Intel Corporation
*/
/**
* @file rte_stack.h
* @b EXPERIMENTAL: this API may change without prior notice
*
* RTE Stack
*
* librte_stack provides an API for configuration and use of a bounded stack of
* pointers. Push and pop operations are MT-safe, allowing concurrent access,
* and the interface supports pushing and popping multiple pointers at a time.
*/
#ifndef _RTE_STACK_H_
#define _RTE_STACK_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <rte_compat.h>
#include <rte_debug.h>
#include <rte_errno.h>
#include <rte_memzone.h>
#include <rte_spinlock.h>
#define RTE_TAILQ_STACK_NAME "RTE_STACK"
#define RTE_STACK_MZ_PREFIX "STK_"
/** The maximum length of a stack name. */
#define RTE_STACK_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_STACK_MZ_PREFIX) + 1)
/* Structure containing the LIFO, its current length, and a lock for mutual
* exclusion.
*/
struct rte_stack_std {
rte_spinlock_t lock; /**< LIFO lock */
uint32_t len; /**< LIFO len */
void *objs[]; /**< LIFO pointer table */
};
/* The RTE stack structure contains the LIFO structure itself, plus metadata
* such as its name and memzone pointer.
*/
struct rte_stack {
/** Name of the stack. */
char name[RTE_STACK_NAMESIZE] __rte_cache_aligned;
/** Memzone containing the rte_stack structure. */
const struct rte_memzone *memzone;
uint32_t capacity; /**< Usable size of the stack. */
uint32_t flags; /**< Flags supplied at creation. */
struct rte_stack_std stack_std; /**< LIFO structure. */
} __rte_cache_aligned;
#include "rte_stack_std.h"
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Push several objects on the stack (MT-safe).
*
* @param s
* A pointer to the stack structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to push on the stack from the obj_table.
* @return
* Actual number of objects pushed (either 0 or *n*).
*/
static __rte_always_inline unsigned int __rte_experimental
rte_stack_push(struct rte_stack *s, void * const *obj_table, unsigned int n)
{
RTE_ASSERT(s != NULL);
RTE_ASSERT(obj_table != NULL);
return __rte_stack_std_push(s, obj_table, n);
}
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Pop several objects from the stack (MT-safe).
*
* @param s
* A pointer to the stack structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to pull from the stack.
* @return
* Actual number of objects popped (either 0 or *n*).
*/
static __rte_always_inline unsigned int __rte_experimental
rte_stack_pop(struct rte_stack *s, void **obj_table, unsigned int n)
{
RTE_ASSERT(s != NULL);
RTE_ASSERT(obj_table != NULL);
return __rte_stack_std_pop(s, obj_table, n);
}
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Return the number of used entries in a stack.
*
* @param s
* A pointer to the stack structure.
* @return
* The number of used entries in the stack.
*/
static __rte_always_inline unsigned int __rte_experimental
rte_stack_count(struct rte_stack *s)
{
RTE_ASSERT(s != NULL);
return __rte_stack_std_count(s);
}
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Return the number of free entries in a stack.
*
* @param s
* A pointer to the stack structure.
* @return
* The number of free entries in the stack.
*/
static __rte_always_inline unsigned int __rte_experimental
rte_stack_free_count(struct rte_stack *s)
{
RTE_ASSERT(s != NULL);
return s->capacity - rte_stack_count(s);
}
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Create a new stack named *name* in memory.
*
* This function uses ``memzone_reserve()`` to allocate memory for a stack of
* size *count*. The behavior of the stack is controlled by the *flags*.
*
* @param name
* The name of the stack.
* @param count
* The size of the stack.
* @param socket_id
* The *socket_id* argument is the socket identifier in case of
* NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
* constraint for the reserved zone.
* @param flags
* Reserved for future use.
* @return
* On success, the pointer to the new allocated stack. NULL on error with
* rte_errno set appropriately. Possible errno values include:
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a stack with the same name already exists
* - ENOMEM - insufficient memory to create the stack
* - ENAMETOOLONG - name size exceeds RTE_STACK_NAMESIZE
*/
struct rte_stack *__rte_experimental
rte_stack_create(const char *name, unsigned int count, int socket_id,
uint32_t flags);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Free all memory used by the stack.
*
* @param s
* Stack to free
*/
void __rte_experimental
rte_stack_free(struct rte_stack *s);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Lookup a stack by its name.
*
* @param name
* The name of the stack.
* @return
* The pointer to the stack matching the name, or NULL if not found,
* with rte_errno set appropriately. Possible rte_errno values include:
* - ENOENT - Stack with name *name* not found.
* - EINVAL - *name* pointer is NULL.
*/
struct rte_stack * __rte_experimental
rte_stack_lookup(const char *name);
#ifdef __cplusplus
}
#endif
#endif /* _RTE_STACK_H_ */

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Intel Corporation
*/
#ifndef _RTE_STACK_PVT_H_
#define _RTE_STACK_PVT_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <rte_log.h>
extern int stack_logtype;
#define STACK_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ##level, stack_logtype, "%s(): "fmt "\n", \
__func__, ##args)
#define STACK_LOG_ERR(fmt, args...) \
STACK_LOG(ERR, fmt, ## args)
#define STACK_LOG_WARN(fmt, args...) \
STACK_LOG(WARNING, fmt, ## args)
#define STACK_LOG_INFO(fmt, args...) \
STACK_LOG(INFO, fmt, ## args)
#ifdef __cplusplus
}
#endif
#endif /* _RTE_STACK_PVT_H_ */

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Intel Corporation
*/
#include "rte_stack.h"
void
rte_stack_std_init(struct rte_stack *s)
{
rte_spinlock_init(&s->stack_std.lock);
}
ssize_t
rte_stack_std_get_memsize(unsigned int count)
{
ssize_t sz = sizeof(struct rte_stack);
sz += RTE_CACHE_LINE_ROUNDUP(count * sizeof(void *));
/* Add padding to avoid false sharing conflicts caused by
* next-line hardware prefetchers.
*/
sz += 2 * RTE_CACHE_LINE_SIZE;
return sz;
}

@ -0,0 +1,121 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2019 Intel Corporation
*/
#ifndef _RTE_STACK_STD_H_
#define _RTE_STACK_STD_H_
#include <rte_branch_prediction.h>
/**
* @internal Push several objects on the stack (MT-safe).
*
* @param s
* A pointer to the stack structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to push on the stack from the obj_table.
* @return
* Actual number of objects pushed (either 0 or *n*).
*/
static __rte_always_inline unsigned int __rte_experimental
__rte_stack_std_push(struct rte_stack *s, void * const *obj_table,
unsigned int n)
{
struct rte_stack_std *stack = &s->stack_std;
unsigned int index;
void **cache_objs;
rte_spinlock_lock(&stack->lock);
cache_objs = &stack->objs[stack->len];
/* Is there sufficient space in the stack? */
if ((stack->len + n) > s->capacity) {
rte_spinlock_unlock(&stack->lock);
return 0;
}
/* Add elements back into the cache */
for (index = 0; index < n; ++index, obj_table++)
cache_objs[index] = *obj_table;
stack->len += n;
rte_spinlock_unlock(&stack->lock);
return n;
}
/**
* @internal Pop several objects from the stack (MT-safe).
*
* @param s
* A pointer to the stack structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to pull from the stack.
* @return
* Actual number of objects popped (either 0 or *n*).
*/
static __rte_always_inline unsigned int __rte_experimental
__rte_stack_std_pop(struct rte_stack *s, void **obj_table, unsigned int n)
{
struct rte_stack_std *stack = &s->stack_std;
unsigned int index, len;
void **cache_objs;
rte_spinlock_lock(&stack->lock);
if (unlikely(n > stack->len)) {
rte_spinlock_unlock(&stack->lock);
return 0;
}
cache_objs = stack->objs;
for (index = 0, len = stack->len - 1; index < n;
++index, len--, obj_table++)
*obj_table = cache_objs[len];
stack->len -= n;
rte_spinlock_unlock(&stack->lock);
return n;
}
/**
* @internal Return the number of used entries in a stack.
*
* @param s
* A pointer to the stack structure.
* @return
* The number of used entries in the stack.
*/
static __rte_always_inline unsigned int __rte_experimental
__rte_stack_std_count(struct rte_stack *s)
{
return (unsigned int)s->stack_std.len;
}
/**
* @internal Initialize a standard stack.
*
* @param s
* A pointer to the stack structure.
*/
void
rte_stack_std_init(struct rte_stack *s);
/**
* @internal Return the memory required for a standard stack.
*
* @param count
* The size of the stack.
* @return
* The bytes to allocate for a standard stack.
*/
ssize_t
rte_stack_std_get_memsize(unsigned int count);
#endif /* _RTE_STACK_STD_H_ */

@ -0,0 +1,9 @@
EXPERIMENTAL {
global:
rte_stack_create;
rte_stack_free;
rte_stack_lookup;
local: *;
};

@ -22,7 +22,7 @@ libraries = [
'gro', 'gso', 'ip_frag', 'jobstats',
'kni', 'latencystats', 'lpm', 'member',
'power', 'pdump', 'rawdev',
'reorder', 'sched', 'security', 'vhost',
'reorder', 'sched', 'security', 'stack', 'vhost',
#ipsec lib depends on crypto and security
'ipsec',
# add pkt framework libs which use other libs from above

@ -89,6 +89,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += -lrte_eventdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_RAWDEV) += -lrte_rawdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += -lrte_mempool
_LDLIBS-$(CONFIG_RTE_LIBRTE_STACK) += -lrte_stack
_LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += -lrte_mempool_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_RING) += -lrte_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_PCI) += -lrte_pci