2018-01-22 07:13:48 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
2018-01-22 07:13:48 +00:00
|
|
|
* Copyright (c) 2010-2017 Intel Corporation
|
2012-09-04 12:54:00 +00:00
|
|
|
* Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
|
|
|
|
* All rights reserved.
|
2018-01-22 07:13:48 +00:00
|
|
|
* Derived from FreeBSD's bufring.h
|
|
|
|
* Used as BSD-3 Licensed with permission from Kip Macy.
|
|
|
|
*/
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#ifndef _RTE_RING_H_
|
|
|
|
#define _RTE_RING_H_
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* RTE Ring
|
|
|
|
*
|
|
|
|
* The Ring Manager is a fixed-size queue, implemented as a table of
|
|
|
|
* pointers. Head and tail pointers are modified atomically, allowing
|
|
|
|
* concurrent access to it. It has the following features:
|
|
|
|
*
|
|
|
|
* - FIFO (First In First Out)
|
|
|
|
* - Maximum size is fixed; the pointers are stored in a table.
|
|
|
|
* - Lockless implementation.
|
|
|
|
* - Multi- or single-consumer dequeue.
|
|
|
|
* - Multi- or single-producer enqueue.
|
|
|
|
* - Bulk dequeue.
|
|
|
|
* - Bulk enqueue.
|
|
|
|
*
|
2018-07-17 04:52:44 +00:00
|
|
|
* Note: the ring implementation is not preemptible. Refer to Programmer's
|
|
|
|
* guide/Environment Abstraction Layer/Multiple pthread/Known Issues/rte_ring
|
|
|
|
* for more information.
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2014-05-02 23:42:56 +00:00
|
|
|
#include <stdio.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <rte_common.h>
|
2017-12-21 13:00:04 +00:00
|
|
|
#include <rte_config.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
2016-07-20 17:16:38 +00:00
|
|
|
#include <rte_memzone.h>
|
2017-06-05 08:58:43 +00:00
|
|
|
#include <rte_pause.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-03-04 21:50:08 +00:00
|
|
|
#define RTE_TAILQ_RING_NAME "RTE_RING"
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
enum rte_ring_queue_behavior {
|
|
|
|
RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
|
2016-11-29 22:23:39 +00:00
|
|
|
RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
|
2012-12-19 23:00:00 +00:00
|
|
|
};
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2014-02-18 09:51:16 +00:00
|
|
|
#define RTE_RING_MZ_PREFIX "RG_"
|
2019-04-04 12:34:54 +00:00
|
|
|
/** The maximum length of a ring name. */
|
2016-07-20 17:16:38 +00:00
|
|
|
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
|
|
|
|
sizeof(RTE_RING_MZ_PREFIX) + 1)
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2017-03-29 15:21:18 +00:00
|
|
|
/* structure to hold a pair of head/tail values and other metadata */
|
|
|
|
struct rte_ring_headtail {
|
|
|
|
volatile uint32_t head; /**< Prod/consumer head. */
|
|
|
|
volatile uint32_t tail; /**< Prod/consumer tail. */
|
|
|
|
uint32_t single; /**< True if single prod/cons */
|
|
|
|
};
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* An RTE ring structure.
|
|
|
|
*
|
|
|
|
* The producer and the consumer have a head and a tail index. The particularity
|
|
|
|
* of these index is that they are not between 0 and size(ring). These indexes
|
|
|
|
* are between 0 and 2^32, and we mask their value when we access the ring[]
|
|
|
|
* field. Thanks to this assumption, we can do subtractions between 2 index
|
|
|
|
* values in a modulo-32bit base: that's why the overflow of the indexes is not
|
|
|
|
* a problem.
|
|
|
|
*/
|
|
|
|
struct rte_ring {
|
2016-07-20 17:16:38 +00:00
|
|
|
/*
|
|
|
|
* Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
|
|
|
|
* compatibility requirements, it could be changed to RTE_RING_NAMESIZE
|
|
|
|
* next time the ABI changes
|
|
|
|
*/
|
2017-04-05 15:03:12 +00:00
|
|
|
char name[RTE_MEMZONE_NAMESIZE] __rte_cache_aligned; /**< Name of the ring. */
|
2017-03-29 15:21:19 +00:00
|
|
|
int flags; /**< Flags supplied at creation. */
|
2015-09-30 12:12:20 +00:00
|
|
|
const struct rte_memzone *memzone;
|
|
|
|
/**< Memzone, if any, containing the rte_ring */
|
2017-03-29 15:21:19 +00:00
|
|
|
uint32_t size; /**< Size of ring. */
|
|
|
|
uint32_t mask; /**< Mask (size-1) of ring. */
|
2017-06-30 15:06:17 +00:00
|
|
|
uint32_t capacity; /**< Usable size of ring */
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-04-03 13:26:44 +00:00
|
|
|
char pad0 __rte_cache_aligned; /**< empty cache line */
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/** Ring producer status. */
|
2018-04-03 13:26:44 +00:00
|
|
|
struct rte_ring_headtail prod __rte_cache_aligned;
|
|
|
|
char pad1 __rte_cache_aligned; /**< empty cache line */
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/** Ring consumer status. */
|
2018-04-03 13:26:44 +00:00
|
|
|
struct rte_ring_headtail cons __rte_cache_aligned;
|
|
|
|
char pad2 __rte_cache_aligned; /**< empty cache line */
|
2017-04-05 15:03:12 +00:00
|
|
|
};
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
|
|
|
|
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
|
2017-06-30 15:06:17 +00:00
|
|
|
/**
|
|
|
|
* Ring is to hold exactly requested number of entries.
|
|
|
|
* Without this flag set, the ring size requested must be a power of 2, and the
|
|
|
|
* usable space will be that size - 1. With the flag, the requested size will
|
|
|
|
* be rounded up to the next power of two, but the usable space will be exactly
|
|
|
|
* that requested. Worst case, if a power-of-2 size is requested, half the
|
|
|
|
* ring space will be wasted.
|
|
|
|
*/
|
|
|
|
#define RING_F_EXACT_SZ 0x0004
|
2017-09-20 11:32:15 +00:00
|
|
|
#define RTE_RING_SZ_MASK (0x7fffffffU) /**< Ring size mask */
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2017-03-29 15:21:28 +00:00
|
|
|
/* @internal defines for passing to the enqueue dequeue worker functions */
|
|
|
|
#define __IS_SP 1
|
|
|
|
#define __IS_MP 0
|
|
|
|
#define __IS_SC 1
|
|
|
|
#define __IS_MC 0
|
|
|
|
|
2014-05-09 10:14:52 +00:00
|
|
|
/**
|
|
|
|
* Calculate the memory size needed for a ring
|
|
|
|
*
|
|
|
|
* This function returns the number of bytes needed for a ring, given
|
|
|
|
* the number of elements in it. This value is the sum of the size of
|
|
|
|
* the structure rte_ring and the size of the memory needed by the
|
|
|
|
* objects pointers. The value is aligned to a cache line size.
|
|
|
|
*
|
|
|
|
* @param count
|
|
|
|
* The number of elements in the ring (must be a power of 2).
|
|
|
|
* @return
|
|
|
|
* - The memory size needed for the ring on success.
|
|
|
|
* - -EINVAL if count is not a power of 2.
|
|
|
|
*/
|
|
|
|
ssize_t rte_ring_get_memsize(unsigned count);
|
|
|
|
|
2014-05-09 10:14:53 +00:00
|
|
|
/**
|
|
|
|
* Initialize a ring structure.
|
|
|
|
*
|
|
|
|
* Initialize a ring structure in memory pointed by "r". The size of the
|
|
|
|
* memory area must be large enough to store the ring structure and the
|
|
|
|
* object table. It is advised to use rte_ring_get_memsize() to get the
|
|
|
|
* appropriate size.
|
|
|
|
*
|
|
|
|
* The ring size is set to *count*, which must be a power of two. Water
|
|
|
|
* marking is disabled by default. The real usable ring size is
|
|
|
|
* *count-1* instead of *count* to differentiate a free ring from an
|
|
|
|
* empty ring.
|
|
|
|
*
|
|
|
|
* The ring is not added in RTE_TAILQ_RING global list. Indeed, the
|
|
|
|
* memory given by the caller may not be shareable among dpdk
|
|
|
|
* processes.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* The pointer to the ring structure followed by the objects table.
|
|
|
|
* @param name
|
|
|
|
* The name of the ring.
|
|
|
|
* @param count
|
|
|
|
* The number of elements in the ring (must be a power of 2).
|
|
|
|
* @param flags
|
|
|
|
* An OR of the following:
|
|
|
|
* - RING_F_SP_ENQ: If this flag is set, the default behavior when
|
|
|
|
* using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
|
|
|
|
* is "single-producer". Otherwise, it is "multi-producers".
|
|
|
|
* - RING_F_SC_DEQ: If this flag is set, the default behavior when
|
|
|
|
* using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
|
|
|
|
* is "single-consumer". Otherwise, it is "multi-consumers".
|
|
|
|
* @return
|
|
|
|
* 0 on success, or a negative value on error.
|
|
|
|
*/
|
|
|
|
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
|
|
|
|
unsigned flags);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* Create a new ring named *name* in memory.
|
|
|
|
*
|
2014-05-09 10:14:53 +00:00
|
|
|
* This function uses ``memzone_reserve()`` to allocate memory. Then it
|
|
|
|
* calls rte_ring_init() to initialize an empty ring.
|
|
|
|
*
|
|
|
|
* The new ring size is set to *count*, which must be a power of
|
|
|
|
* two. Water marking is disabled by default. The real usable ring size
|
|
|
|
* is *count-1* instead of *count* to differentiate a free ring from an
|
|
|
|
* empty ring.
|
|
|
|
*
|
|
|
|
* The ring is added in RTE_TAILQ_RING list.
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
|
|
|
* @param name
|
|
|
|
* The name of the ring.
|
|
|
|
* @param count
|
|
|
|
* The size of the ring (must be a power of 2).
|
|
|
|
* @param socket_id
|
|
|
|
* The *socket_id* argument is the socket identifier in case of
|
|
|
|
* NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
|
|
|
|
* constraint for the reserved zone.
|
|
|
|
* @param flags
|
|
|
|
* An OR of the following:
|
|
|
|
* - RING_F_SP_ENQ: If this flag is set, the default behavior when
|
|
|
|
* using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
|
|
|
|
* is "single-producer". Otherwise, it is "multi-producers".
|
|
|
|
* - RING_F_SC_DEQ: If this flag is set, the default behavior when
|
|
|
|
* using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
|
|
|
|
* is "single-consumer". Otherwise, it is "multi-consumers".
|
|
|
|
* @return
|
|
|
|
* On success, the pointer to the new allocated ring. NULL on error with
|
|
|
|
* rte_errno set appropriately. Possible errno values include:
|
|
|
|
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
|
|
|
|
* - E_RTE_SECONDARY - function was called from a secondary process instance
|
|
|
|
* - EINVAL - count provided is not a power of 2
|
|
|
|
* - ENOSPC - the maximum number of memzones has already been allocated
|
|
|
|
* - EEXIST - a memzone with the same name already exists
|
|
|
|
* - ENOMEM - no appropriate memory area found in which to create memzone
|
|
|
|
*/
|
|
|
|
struct rte_ring *rte_ring_create(const char *name, unsigned count,
|
|
|
|
int socket_id, unsigned flags);
|
2015-10-02 15:53:44 +00:00
|
|
|
/**
|
|
|
|
* De-allocate all memory used by the ring.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* Ring to free
|
|
|
|
*/
|
|
|
|
void rte_ring_free(struct rte_ring *r);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/**
|
2016-09-02 11:01:51 +00:00
|
|
|
* Dump the status of the ring to a file.
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
2014-05-02 23:42:56 +00:00
|
|
|
* @param f
|
|
|
|
* A pointer to a file for output
|
2012-09-04 12:54:00 +00:00
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
*/
|
2014-05-02 23:42:56 +00:00
|
|
|
void rte_ring_dump(FILE *f, const struct rte_ring *r);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
/* the actual enqueue of pointers on the ring.
|
2013-06-03 00:00:00 +00:00
|
|
|
* Placed here since identical code needed in both
|
|
|
|
* single and multi producer enqueue functions */
|
2017-03-29 15:21:30 +00:00
|
|
|
#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
|
2017-03-29 15:21:27 +00:00
|
|
|
unsigned int i; \
|
2017-03-29 15:21:30 +00:00
|
|
|
const uint32_t size = (r)->size; \
|
|
|
|
uint32_t idx = prod_head & (r)->mask; \
|
2017-04-06 13:59:47 +00:00
|
|
|
obj_type *ring = (obj_type *)ring_start; \
|
2013-06-03 00:00:00 +00:00
|
|
|
if (likely(idx + n < size)) { \
|
|
|
|
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
|
2017-03-29 15:21:30 +00:00
|
|
|
ring[idx] = obj_table[i]; \
|
|
|
|
ring[idx+1] = obj_table[i+1]; \
|
|
|
|
ring[idx+2] = obj_table[i+2]; \
|
|
|
|
ring[idx+3] = obj_table[i+3]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
} \
|
|
|
|
switch (n & 0x3) { \
|
2017-03-29 15:21:30 +00:00
|
|
|
case 3: \
|
|
|
|
ring[idx++] = obj_table[i++]; /* fallthrough */ \
|
|
|
|
case 2: \
|
|
|
|
ring[idx++] = obj_table[i++]; /* fallthrough */ \
|
|
|
|
case 1: \
|
|
|
|
ring[idx++] = obj_table[i++]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
} \
|
|
|
|
} else { \
|
|
|
|
for (i = 0; idx < size; i++, idx++)\
|
2017-03-29 15:21:30 +00:00
|
|
|
ring[idx] = obj_table[i]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
for (idx = 0; i < n; i++, idx++) \
|
2017-03-29 15:21:30 +00:00
|
|
|
ring[idx] = obj_table[i]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
} \
|
2017-03-29 15:21:30 +00:00
|
|
|
} while (0)
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
/* the actual copy of pointers on the ring to obj_table.
|
2013-06-03 00:00:00 +00:00
|
|
|
* Placed here since identical code needed in both
|
|
|
|
* single and multi consumer dequeue functions */
|
2017-03-29 15:21:30 +00:00
|
|
|
#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
|
2017-03-29 15:21:27 +00:00
|
|
|
unsigned int i; \
|
2017-03-29 15:21:30 +00:00
|
|
|
uint32_t idx = cons_head & (r)->mask; \
|
|
|
|
const uint32_t size = (r)->size; \
|
2017-04-06 13:59:47 +00:00
|
|
|
obj_type *ring = (obj_type *)ring_start; \
|
2013-06-03 00:00:00 +00:00
|
|
|
if (likely(idx + n < size)) { \
|
|
|
|
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
|
2017-03-29 15:21:30 +00:00
|
|
|
obj_table[i] = ring[idx]; \
|
|
|
|
obj_table[i+1] = ring[idx+1]; \
|
|
|
|
obj_table[i+2] = ring[idx+2]; \
|
|
|
|
obj_table[i+3] = ring[idx+3]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
} \
|
|
|
|
switch (n & 0x3) { \
|
2017-03-29 15:21:30 +00:00
|
|
|
case 3: \
|
|
|
|
obj_table[i++] = ring[idx++]; /* fallthrough */ \
|
|
|
|
case 2: \
|
|
|
|
obj_table[i++] = ring[idx++]; /* fallthrough */ \
|
|
|
|
case 1: \
|
|
|
|
obj_table[i++] = ring[idx++]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
} \
|
|
|
|
} else { \
|
|
|
|
for (i = 0; idx < size; i++, idx++) \
|
2017-03-29 15:21:30 +00:00
|
|
|
obj_table[i] = ring[idx]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
for (idx = 0; i < n; i++, idx++) \
|
2017-03-29 15:21:30 +00:00
|
|
|
obj_table[i] = ring[idx]; \
|
2013-06-03 00:00:00 +00:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2018-01-22 04:41:28 +00:00
|
|
|
/* Between load and load. there might be cpu reorder in weak model
|
|
|
|
* (powerpc/arm).
|
|
|
|
* There are 2 choices for the users
|
|
|
|
* 1.use rmb() memory barrier
|
2019-04-26 15:14:22 +00:00
|
|
|
* 2.use one-direction load_acquire/store_release barrier,defined by
|
2018-10-08 09:11:43 +00:00
|
|
|
* CONFIG_RTE_USE_C11_MEM_MODEL=y
|
2018-01-22 04:41:28 +00:00
|
|
|
* It depends on performance test results.
|
|
|
|
* By default, move common functions to rte_ring_generic.h
|
|
|
|
*/
|
2018-10-08 09:11:43 +00:00
|
|
|
#ifdef RTE_USE_C11_MEM_MODEL
|
2018-01-22 04:41:28 +00:00
|
|
|
#include "rte_ring_c11_mem.h"
|
|
|
|
#else
|
2018-01-22 04:41:27 +00:00
|
|
|
#include "rte_ring_generic.h"
|
2018-01-22 04:41:28 +00:00
|
|
|
#endif
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/**
|
2017-03-29 15:21:28 +00:00
|
|
|
* @internal Enqueue several objects on the ring
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
2017-03-29 15:21:28 +00:00
|
|
|
* @param r
|
2012-09-04 12:54:00 +00:00
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects).
|
|
|
|
* @param n
|
2012-12-19 23:00:00 +00:00
|
|
|
* The number of objects to add in the ring from the obj_table.
|
|
|
|
* @param behavior
|
|
|
|
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
|
2017-03-29 15:21:28 +00:00
|
|
|
* RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
|
|
|
|
* @param is_sp
|
|
|
|
* Indicates whether to use single producer or multi-producer head update
|
|
|
|
* @param free_space
|
|
|
|
* returns the amount of space after the enqueue operation has finished
|
2012-09-04 12:54:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* Actual number of objects enqueued.
|
|
|
|
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2017-03-29 15:21:28 +00:00
|
|
|
__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
|
|
|
|
unsigned int n, enum rte_ring_queue_behavior behavior,
|
2018-05-17 13:49:22 +00:00
|
|
|
unsigned int is_sp, unsigned int *free_space)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
uint32_t prod_head, prod_next;
|
|
|
|
uint32_t free_entries;
|
2017-03-29 15:21:24 +00:00
|
|
|
|
2017-03-29 15:21:28 +00:00
|
|
|
n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
|
|
|
|
&prod_head, &prod_next, &free_entries);
|
2017-03-29 15:21:24 +00:00
|
|
|
if (n == 0)
|
|
|
|
goto end;
|
|
|
|
|
2017-03-29 15:21:30 +00:00
|
|
|
ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-01-22 04:41:27 +00:00
|
|
|
update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
|
2017-03-29 15:21:24 +00:00
|
|
|
end:
|
|
|
|
if (free_space != NULL)
|
|
|
|
*free_space = free_entries - n;
|
2017-03-29 15:21:23 +00:00
|
|
|
return n;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
/**
|
2017-03-29 15:21:28 +00:00
|
|
|
* @internal Dequeue several objects from the ring
|
2012-12-19 23:00:00 +00:00
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
2017-03-29 15:21:28 +00:00
|
|
|
* A pointer to a table of void * pointers (objects).
|
2012-12-19 23:00:00 +00:00
|
|
|
* @param n
|
2017-03-29 15:21:28 +00:00
|
|
|
* The number of objects to pull from the ring.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @param behavior
|
|
|
|
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
|
2017-03-29 15:21:28 +00:00
|
|
|
* RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
|
|
|
|
* @param is_sc
|
|
|
|
* Indicates whether to use single consumer or multi-consumer head update
|
|
|
|
* @param available
|
|
|
|
* returns the number of remaining ring entries after the dequeue has finished
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* - Actual number of objects dequeued.
|
|
|
|
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
|
2012-12-19 23:00:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2017-03-29 15:21:28 +00:00
|
|
|
__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
|
2017-03-29 15:21:25 +00:00
|
|
|
unsigned int n, enum rte_ring_queue_behavior behavior,
|
2018-05-17 13:49:22 +00:00
|
|
|
unsigned int is_sc, unsigned int *available)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
uint32_t cons_head, cons_next;
|
|
|
|
uint32_t entries;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-05-28 09:03:43 +00:00
|
|
|
n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
|
2017-03-29 15:21:28 +00:00
|
|
|
&cons_head, &cons_next, &entries);
|
|
|
|
if (n == 0)
|
|
|
|
goto end;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2017-03-29 15:21:30 +00:00
|
|
|
DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2018-01-22 04:41:27 +00:00
|
|
|
update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
|
2017-03-29 15:21:28 +00:00
|
|
|
|
2017-03-29 15:21:25 +00:00
|
|
|
end:
|
|
|
|
if (available != NULL)
|
|
|
|
*available = entries - n;
|
2017-03-29 15:21:23 +00:00
|
|
|
return n;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enqueue several objects on the ring (multi-producers safe).
|
|
|
|
*
|
|
|
|
* This function uses a "compare and set" instruction to move the
|
|
|
|
* producer index atomically.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects).
|
|
|
|
* @param n
|
|
|
|
* The number of objects to add in the ring from the obj_table.
|
2017-03-29 15:21:24 +00:00
|
|
|
* @param free_space
|
|
|
|
* if non-NULL, returns the amount of space in the ring after the
|
|
|
|
* enqueue operation has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* The number of objects enqueued, either 0 or n
|
2012-12-19 23:00:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2012-12-19 23:00:00 +00:00
|
|
|
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
|
2017-03-29 15:21:24 +00:00
|
|
|
unsigned int n, unsigned int *free_space)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
|
|
|
|
__IS_MP, free_space);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enqueue several objects on a ring (NOT multi-producers safe).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects).
|
|
|
|
* @param n
|
|
|
|
* The number of objects to add in the ring from the obj_table.
|
2017-03-29 15:21:24 +00:00
|
|
|
* @param free_space
|
|
|
|
* if non-NULL, returns the amount of space in the ring after the
|
|
|
|
* enqueue operation has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* The number of objects enqueued, either 0 or n
|
2012-12-19 23:00:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2012-12-19 23:00:00 +00:00
|
|
|
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
|
2017-03-29 15:21:24 +00:00
|
|
|
unsigned int n, unsigned int *free_space)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
|
|
|
|
__IS_SP, free_space);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* Enqueue several objects on a ring.
|
|
|
|
*
|
|
|
|
* This function calls the multi-producer or the single-producer
|
|
|
|
* version depending on the default behavior that was specified at
|
|
|
|
* ring creation time (see flags).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects).
|
|
|
|
* @param n
|
|
|
|
* The number of objects to add in the ring from the obj_table.
|
2017-03-29 15:21:24 +00:00
|
|
|
* @param free_space
|
|
|
|
* if non-NULL, returns the amount of space in the ring after the
|
|
|
|
* enqueue operation has finished.
|
2012-09-04 12:54:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* The number of objects enqueued, either 0 or n
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
|
2017-03-29 15:21:24 +00:00
|
|
|
unsigned int n, unsigned int *free_space)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
|
|
|
|
r->prod.single, free_space);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enqueue one object on a ring (multi-producers safe).
|
|
|
|
*
|
|
|
|
* This function uses a "compare and set" instruction to move the
|
|
|
|
* producer index atomically.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj
|
|
|
|
* A pointer to the object to be added.
|
|
|
|
* @return
|
|
|
|
* - 0: Success; objects enqueued.
|
|
|
|
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
|
|
|
|
{
|
2017-03-29 15:21:24 +00:00
|
|
|
return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enqueue one object on a ring (NOT multi-producers safe).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj
|
|
|
|
* A pointer to the object to be added.
|
|
|
|
* @return
|
|
|
|
* - 0: Success; objects enqueued.
|
|
|
|
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
|
|
|
|
{
|
2017-03-29 15:21:24 +00:00
|
|
|
return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enqueue one object on a ring.
|
|
|
|
*
|
|
|
|
* This function calls the multi-producer or the single-producer
|
|
|
|
* version, depending on the default behaviour that was specified at
|
|
|
|
* ring creation time (see flags).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj
|
|
|
|
* A pointer to the object to be added.
|
|
|
|
* @return
|
|
|
|
* - 0: Success; objects enqueued.
|
|
|
|
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_ring_enqueue(struct rte_ring *r, void *obj)
|
|
|
|
{
|
2017-03-29 15:21:24 +00:00
|
|
|
return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue several objects from a ring (multi-consumers safe).
|
|
|
|
*
|
|
|
|
* This function uses a "compare and set" instruction to move the
|
|
|
|
* consumer index atomically.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects) that will be filled.
|
|
|
|
* @param n
|
2012-12-19 23:00:00 +00:00
|
|
|
* The number of objects to dequeue from the ring to the obj_table.
|
2017-03-29 15:21:25 +00:00
|
|
|
* @param available
|
|
|
|
* If non-NULL, returns the number of remaining ring entries after the
|
|
|
|
* dequeue has finished.
|
2012-09-04 12:54:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* The number of objects dequeued, either 0 or n
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2017-03-29 15:21:25 +00:00
|
|
|
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
|
|
|
|
unsigned int n, unsigned int *available)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
|
|
|
|
__IS_MC, available);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue several objects from a ring (NOT multi-consumers safe).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects) that will be filled.
|
|
|
|
* @param n
|
|
|
|
* The number of objects to dequeue from the ring to the obj_table,
|
|
|
|
* must be strictly positive.
|
2017-03-29 15:21:25 +00:00
|
|
|
* @param available
|
|
|
|
* If non-NULL, returns the number of remaining ring entries after the
|
|
|
|
* dequeue has finished.
|
2012-09-04 12:54:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* The number of objects dequeued, either 0 or n
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2017-03-29 15:21:25 +00:00
|
|
|
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
|
|
|
|
unsigned int n, unsigned int *available)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
|
|
|
|
__IS_SC, available);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue several objects from a ring.
|
|
|
|
*
|
|
|
|
* This function calls the multi-consumers or the single-consumer
|
|
|
|
* version, depending on the default behaviour that was specified at
|
|
|
|
* ring creation time (see flags).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects) that will be filled.
|
|
|
|
* @param n
|
|
|
|
* The number of objects to dequeue from the ring to the obj_table.
|
2017-03-29 15:21:25 +00:00
|
|
|
* @param available
|
|
|
|
* If non-NULL, returns the number of remaining ring entries after the
|
|
|
|
* dequeue has finished.
|
2012-09-04 12:54:00 +00:00
|
|
|
* @return
|
2017-03-29 15:21:23 +00:00
|
|
|
* The number of objects dequeued, either 0 or n
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned int
|
2017-03-29 15:21:25 +00:00
|
|
|
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
|
|
|
|
unsigned int *available)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
|
|
|
|
r->cons.single, available);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue one object from a ring (multi-consumers safe).
|
|
|
|
*
|
|
|
|
* This function uses a "compare and set" instruction to move the
|
|
|
|
* consumer index atomically.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_p
|
|
|
|
* A pointer to a void * pointer (object) that will be filled.
|
|
|
|
* @return
|
|
|
|
* - 0: Success; objects dequeued.
|
|
|
|
* - -ENOENT: Not enough entries in the ring to dequeue; no object is
|
|
|
|
* dequeued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
|
|
|
|
{
|
2017-06-02 06:29:51 +00:00
|
|
|
return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue one object from a ring (NOT multi-consumers safe).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_p
|
|
|
|
* A pointer to a void * pointer (object) that will be filled.
|
|
|
|
* @return
|
|
|
|
* - 0: Success; objects dequeued.
|
|
|
|
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
|
|
|
|
* dequeued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
|
|
|
|
{
|
2017-06-02 06:29:51 +00:00
|
|
|
return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue one object from a ring.
|
|
|
|
*
|
|
|
|
* This function calls the multi-consumers or the single-consumer
|
|
|
|
* version depending on the default behaviour that was specified at
|
|
|
|
* ring creation time (see flags).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_p
|
|
|
|
* A pointer to a void * pointer (object) that will be filled.
|
|
|
|
* @return
|
|
|
|
* - 0: Success, objects dequeued.
|
|
|
|
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
|
|
|
|
* dequeued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
|
|
|
|
{
|
2017-04-13 09:42:56 +00:00
|
|
|
return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2019-07-16 19:23:55 +00:00
|
|
|
/**
|
|
|
|
* Flush a ring.
|
|
|
|
*
|
|
|
|
* This function flush all the elements in a ring
|
|
|
|
*
|
|
|
|
* @b EXPERIMENTAL: this API may change without prior notice
|
|
|
|
*
|
|
|
|
* @warning
|
|
|
|
* Make sure the ring is not in use while calling this function.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
*/
|
|
|
|
__rte_experimental
|
|
|
|
void
|
|
|
|
rte_ring_reset(struct rte_ring *r);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
2017-06-30 15:06:17 +00:00
|
|
|
* Return the number of entries in a ring.
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @return
|
2017-06-30 15:06:17 +00:00
|
|
|
* The number of entries in the ring.
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-06-30 15:06:17 +00:00
|
|
|
static inline unsigned
|
|
|
|
rte_ring_count(const struct rte_ring *r)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
|
|
|
uint32_t prod_tail = r->prod.tail;
|
|
|
|
uint32_t cons_tail = r->cons.tail;
|
2017-06-30 15:06:17 +00:00
|
|
|
uint32_t count = (prod_tail - cons_tail) & r->mask;
|
|
|
|
return (count > r->capacity) ? r->capacity : count;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-06-30 15:06:17 +00:00
|
|
|
* Return the number of free entries in a ring.
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @return
|
2017-06-30 15:06:17 +00:00
|
|
|
* The number of free entries in the ring.
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-06-30 15:06:17 +00:00
|
|
|
static inline unsigned
|
|
|
|
rte_ring_free_count(const struct rte_ring *r)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-06-30 15:06:17 +00:00
|
|
|
return r->capacity - rte_ring_count(r);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-06-30 15:06:17 +00:00
|
|
|
* Test if a ring is full.
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @return
|
2017-06-30 15:06:17 +00:00
|
|
|
* - 1: The ring is full.
|
|
|
|
* - 0: The ring is not full.
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-06-30 15:06:17 +00:00
|
|
|
static inline int
|
|
|
|
rte_ring_full(const struct rte_ring *r)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-06-30 15:06:17 +00:00
|
|
|
return rte_ring_free_count(r) == 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-06-30 15:06:17 +00:00
|
|
|
* Test if a ring is empty.
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @return
|
2017-06-30 15:06:17 +00:00
|
|
|
* - 1: The ring is empty.
|
|
|
|
* - 0: The ring is not empty.
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2017-06-30 15:06:17 +00:00
|
|
|
static inline int
|
|
|
|
rte_ring_empty(const struct rte_ring *r)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-06-30 15:06:17 +00:00
|
|
|
return rte_ring_count(r) == 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 16:42:00 +00:00
|
|
|
/**
|
|
|
|
* Return the size of the ring.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @return
|
2017-06-30 15:06:17 +00:00
|
|
|
* The size of the data store used by the ring.
|
|
|
|
* NOTE: this is not the same as the usable space in the ring. To query that
|
|
|
|
* use ``rte_ring_get_capacity()``.
|
2017-02-23 16:42:00 +00:00
|
|
|
*/
|
|
|
|
static inline unsigned int
|
|
|
|
rte_ring_get_size(const struct rte_ring *r)
|
|
|
|
{
|
2017-03-29 15:21:19 +00:00
|
|
|
return r->size;
|
2017-02-23 16:42:00 +00:00
|
|
|
}
|
|
|
|
|
2017-06-30 15:06:17 +00:00
|
|
|
/**
|
|
|
|
* Return the number of elements which can be stored in the ring.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @return
|
|
|
|
* The usable size of the ring.
|
|
|
|
*/
|
|
|
|
static inline unsigned int
|
|
|
|
rte_ring_get_capacity(const struct rte_ring *r)
|
|
|
|
{
|
|
|
|
return r->capacity;
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* Dump the status of all rings on the console
|
2014-05-02 23:42:56 +00:00
|
|
|
*
|
|
|
|
* @param f
|
|
|
|
* A pointer to a file for output
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
2014-05-02 23:42:56 +00:00
|
|
|
void rte_ring_list_dump(FILE *f);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Search a ring from its name
|
|
|
|
*
|
|
|
|
* @param name
|
|
|
|
* The name of the ring.
|
|
|
|
* @return
|
|
|
|
* The pointer to the ring matching the name, or NULL if not found,
|
|
|
|
* with rte_errno set appropriately. Possible rte_errno values include:
|
|
|
|
* - ENOENT - required entry not available to return.
|
|
|
|
*/
|
|
|
|
struct rte_ring *rte_ring_lookup(const char *name);
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
/**
|
|
|
|
* Enqueue several objects on the ring (multi-producers safe).
|
|
|
|
*
|
|
|
|
* This function uses a "compare and set" instruction to move the
|
|
|
|
* producer index atomically.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects).
|
|
|
|
* @param n
|
|
|
|
* The number of objects to add in the ring from the obj_table.
|
2017-03-29 15:21:24 +00:00
|
|
|
* @param free_space
|
|
|
|
* if non-NULL, returns the amount of space in the ring after the
|
|
|
|
* enqueue operation has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
|
|
|
* - n: Actual number of objects enqueued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned
|
2012-12-19 23:00:00 +00:00
|
|
|
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
|
2017-03-29 15:21:24 +00:00
|
|
|
unsigned int n, unsigned int *free_space)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_enqueue(r, obj_table, n,
|
|
|
|
RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enqueue several objects on a ring (NOT multi-producers safe).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects).
|
|
|
|
* @param n
|
|
|
|
* The number of objects to add in the ring from the obj_table.
|
2017-03-29 15:21:24 +00:00
|
|
|
* @param free_space
|
|
|
|
* if non-NULL, returns the amount of space in the ring after the
|
|
|
|
* enqueue operation has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
|
|
|
* - n: Actual number of objects enqueued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned
|
2012-12-19 23:00:00 +00:00
|
|
|
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
|
2017-03-29 15:21:24 +00:00
|
|
|
unsigned int n, unsigned int *free_space)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_enqueue(r, obj_table, n,
|
|
|
|
RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enqueue several objects on a ring.
|
|
|
|
*
|
|
|
|
* This function calls the multi-producer or the single-producer
|
|
|
|
* version depending on the default behavior that was specified at
|
|
|
|
* ring creation time (see flags).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects).
|
|
|
|
* @param n
|
|
|
|
* The number of objects to add in the ring from the obj_table.
|
2017-03-29 15:21:24 +00:00
|
|
|
* @param free_space
|
|
|
|
* if non-NULL, returns the amount of space in the ring after the
|
|
|
|
* enqueue operation has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
|
|
|
* - n: Actual number of objects enqueued.
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned
|
2012-12-19 23:00:00 +00:00
|
|
|
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
|
2017-03-29 15:21:24 +00:00
|
|
|
unsigned int n, unsigned int *free_space)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
|
|
|
|
r->prod.single, free_space);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue several objects from a ring (multi-consumers safe). When the request
|
|
|
|
* objects are more than the available objects, only dequeue the actual number
|
|
|
|
* of objects
|
|
|
|
*
|
|
|
|
* This function uses a "compare and set" instruction to move the
|
|
|
|
* consumer index atomically.
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects) that will be filled.
|
|
|
|
* @param n
|
|
|
|
* The number of objects to dequeue from the ring to the obj_table.
|
2017-03-29 15:21:25 +00:00
|
|
|
* @param available
|
|
|
|
* If non-NULL, returns the number of remaining ring entries after the
|
|
|
|
* dequeue has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
|
|
|
* - n: Actual number of objects dequeued, 0 if ring is empty
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned
|
2017-03-29 15:21:25 +00:00
|
|
|
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
|
|
|
|
unsigned int n, unsigned int *available)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_dequeue(r, obj_table, n,
|
|
|
|
RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue several objects from a ring (NOT multi-consumers safe).When the
|
|
|
|
* request objects are more than the available objects, only dequeue the
|
|
|
|
* actual number of objects
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects) that will be filled.
|
|
|
|
* @param n
|
|
|
|
* The number of objects to dequeue from the ring to the obj_table.
|
2017-03-29 15:21:25 +00:00
|
|
|
* @param available
|
|
|
|
* If non-NULL, returns the number of remaining ring entries after the
|
|
|
|
* dequeue has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
|
|
|
* - n: Actual number of objects dequeued, 0 if ring is empty
|
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned
|
2017-03-29 15:21:25 +00:00
|
|
|
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
|
|
|
|
unsigned int n, unsigned int *available)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_dequeue(r, obj_table, n,
|
|
|
|
RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Dequeue multiple objects from a ring up to a maximum number.
|
|
|
|
*
|
|
|
|
* This function calls the multi-consumers or the single-consumer
|
|
|
|
* version, depending on the default behaviour that was specified at
|
|
|
|
* ring creation time (see flags).
|
|
|
|
*
|
|
|
|
* @param r
|
|
|
|
* A pointer to the ring structure.
|
|
|
|
* @param obj_table
|
|
|
|
* A pointer to a table of void * pointers (objects) that will be filled.
|
|
|
|
* @param n
|
|
|
|
* The number of objects to dequeue from the ring to the obj_table.
|
2017-03-29 15:21:25 +00:00
|
|
|
* @param available
|
|
|
|
* If non-NULL, returns the number of remaining ring entries after the
|
|
|
|
* dequeue has finished.
|
2012-12-19 23:00:00 +00:00
|
|
|
* @return
|
2014-12-15 13:41:46 +00:00
|
|
|
* - Number of objects dequeued
|
2012-12-19 23:00:00 +00:00
|
|
|
*/
|
2017-05-13 09:27:25 +00:00
|
|
|
static __rte_always_inline unsigned
|
2017-03-29 15:21:25 +00:00
|
|
|
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
|
|
|
|
unsigned int n, unsigned int *available)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
2017-03-29 15:21:28 +00:00
|
|
|
return __rte_ring_do_dequeue(r, obj_table, n,
|
|
|
|
RTE_RING_QUEUE_VARIABLE,
|
|
|
|
r->cons.single, available);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _RTE_RING_H_ */
|