2018-01-22 07:13:48 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2012-09-04 12:54:00 +00:00
|
|
|
*
|
2018-01-22 07:13:48 +00:00
|
|
|
* Copyright (c) 2010-2015 Intel Corporation
|
2012-09-04 12:54:00 +00:00
|
|
|
* Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org
|
|
|
|
* All rights reserved.
|
2018-01-22 07:13:48 +00:00
|
|
|
* Derived from FreeBSD's bufring.h
|
|
|
|
* Used as BSD-3 Licensed with permission from Kip Macy.
|
|
|
|
*/
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_memzone.h>
|
2014-06-20 15:42:19 +00:00
|
|
|
#include <rte_malloc.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <rte_launch.h>
|
|
|
|
#include <rte_eal.h>
|
2012-12-19 23:00:00 +00:00
|
|
|
#include <rte_eal_memconfig.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_per_lcore.h>
|
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_errno.h>
|
|
|
|
#include <rte_string_fns.h>
|
2012-12-19 23:00:00 +00:00
|
|
|
#include <rte_spinlock.h>
|
2019-07-05 13:10:30 +00:00
|
|
|
#include <rte_tailq.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#include "rte_ring.h"
|
2020-01-18 19:32:43 +00:00
|
|
|
#include "rte_ring_elem.h"
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2014-06-20 15:42:19 +00:00
|
|
|
TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-03-04 21:50:08 +00:00
|
|
|
static struct rte_tailq_elem rte_ring_tailq = {
|
|
|
|
.name = RTE_TAILQ_RING_NAME,
|
|
|
|
};
|
|
|
|
EAL_REGISTER_TAILQ(rte_ring_tailq)
|
|
|
|
|
2020-04-27 23:16:22 +00:00
|
|
|
/* mask of all valid flag values to ring_create() */
|
|
|
|
#define RING_F_MASK (RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ | \
|
|
|
|
RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ | \
|
|
|
|
RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ)
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* true if x is a power of 2 */
|
|
|
|
#define POWEROF2(x) ((((x)-1) & (x)) == 0)
|
|
|
|
|
2020-04-20 12:28:24 +00:00
|
|
|
/* by default set head/tail distance as 1/8 of ring capacity */
|
|
|
|
#define HTD_MAX_DEF 8
|
|
|
|
|
2014-05-09 10:14:52 +00:00
|
|
|
/* return the size of memory occupied by a ring */
|
|
|
|
ssize_t
|
2020-01-18 19:32:43 +00:00
|
|
|
rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
|
2014-05-09 10:14:52 +00:00
|
|
|
{
|
|
|
|
ssize_t sz;
|
|
|
|
|
2020-01-18 19:32:43 +00:00
|
|
|
/* Check if element size is a multiple of 4B */
|
|
|
|
if (esize % 4 != 0) {
|
|
|
|
RTE_LOG(ERR, RING, "element size is not a multiple of 4\n");
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-05-09 10:14:52 +00:00
|
|
|
/* count must be a power of 2 */
|
|
|
|
if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) {
|
|
|
|
RTE_LOG(ERR, RING,
|
2020-01-18 19:32:43 +00:00
|
|
|
"Requested number of elements is invalid, must be power of 2, and not exceed %u\n",
|
|
|
|
RTE_RING_SZ_MASK);
|
|
|
|
|
2014-05-09 10:14:52 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-01-18 19:32:43 +00:00
|
|
|
sz = sizeof(struct rte_ring) + count * esize;
|
2014-11-19 12:26:06 +00:00
|
|
|
sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
|
2014-05-09 10:14:52 +00:00
|
|
|
return sz;
|
|
|
|
}
|
|
|
|
|
2020-01-18 19:32:43 +00:00
|
|
|
/* return the size of memory occupied by a ring */
|
|
|
|
ssize_t
|
|
|
|
rte_ring_get_memsize(unsigned int count)
|
|
|
|
{
|
|
|
|
return rte_ring_get_memsize_elem(sizeof(void *), count);
|
|
|
|
}
|
|
|
|
|
2020-04-20 12:28:24 +00:00
|
|
|
/*
|
|
|
|
* internal helper function to reset prod/cons head-tail values.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
reset_headtail(void *p)
|
|
|
|
{
|
|
|
|
struct rte_ring_headtail *ht;
|
2020-04-20 12:28:26 +00:00
|
|
|
struct rte_ring_hts_headtail *ht_hts;
|
2020-04-20 12:28:24 +00:00
|
|
|
struct rte_ring_rts_headtail *ht_rts;
|
|
|
|
|
|
|
|
ht = p;
|
2020-04-20 12:28:26 +00:00
|
|
|
ht_hts = p;
|
2020-04-20 12:28:24 +00:00
|
|
|
ht_rts = p;
|
|
|
|
|
|
|
|
switch (ht->sync_type) {
|
|
|
|
case RTE_RING_SYNC_MT:
|
|
|
|
case RTE_RING_SYNC_ST:
|
|
|
|
ht->head = 0;
|
|
|
|
ht->tail = 0;
|
|
|
|
break;
|
|
|
|
case RTE_RING_SYNC_MT_RTS:
|
|
|
|
ht_rts->head.raw = 0;
|
|
|
|
ht_rts->tail.raw = 0;
|
|
|
|
break;
|
2020-04-20 12:28:26 +00:00
|
|
|
case RTE_RING_SYNC_MT_HTS:
|
|
|
|
ht_hts->ht.raw = 0;
|
|
|
|
break;
|
2020-04-20 12:28:24 +00:00
|
|
|
default:
|
|
|
|
/* unknown sync mode */
|
|
|
|
RTE_ASSERT(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-16 19:23:55 +00:00
|
|
|
void
|
|
|
|
rte_ring_reset(struct rte_ring *r)
|
|
|
|
{
|
2020-04-20 12:28:24 +00:00
|
|
|
reset_headtail(&r->prod);
|
|
|
|
reset_headtail(&r->cons);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* helper function, calculates sync_type values for prod and cons
|
|
|
|
* based on input flags. Returns zero at success or negative
|
|
|
|
* errno value otherwise.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
get_sync_type(uint32_t flags, enum rte_ring_sync_type *prod_st,
|
|
|
|
enum rte_ring_sync_type *cons_st)
|
|
|
|
{
|
|
|
|
static const uint32_t prod_st_flags =
|
2020-04-20 12:28:26 +00:00
|
|
|
(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ);
|
2020-04-20 12:28:24 +00:00
|
|
|
static const uint32_t cons_st_flags =
|
2020-04-20 12:28:26 +00:00
|
|
|
(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ);
|
2020-04-20 12:28:24 +00:00
|
|
|
|
|
|
|
switch (flags & prod_st_flags) {
|
|
|
|
case 0:
|
|
|
|
*prod_st = RTE_RING_SYNC_MT;
|
|
|
|
break;
|
|
|
|
case RING_F_SP_ENQ:
|
|
|
|
*prod_st = RTE_RING_SYNC_ST;
|
|
|
|
break;
|
|
|
|
case RING_F_MP_RTS_ENQ:
|
|
|
|
*prod_st = RTE_RING_SYNC_MT_RTS;
|
|
|
|
break;
|
2020-04-20 12:28:26 +00:00
|
|
|
case RING_F_MP_HTS_ENQ:
|
|
|
|
*prod_st = RTE_RING_SYNC_MT_HTS;
|
|
|
|
break;
|
2020-04-20 12:28:24 +00:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (flags & cons_st_flags) {
|
|
|
|
case 0:
|
|
|
|
*cons_st = RTE_RING_SYNC_MT;
|
|
|
|
break;
|
|
|
|
case RING_F_SC_DEQ:
|
|
|
|
*cons_st = RTE_RING_SYNC_ST;
|
|
|
|
break;
|
|
|
|
case RING_F_MC_RTS_DEQ:
|
|
|
|
*cons_st = RTE_RING_SYNC_MT_RTS;
|
|
|
|
break;
|
2020-04-20 12:28:26 +00:00
|
|
|
case RING_F_MC_HTS_DEQ:
|
|
|
|
*cons_st = RTE_RING_SYNC_MT_HTS;
|
|
|
|
break;
|
2020-04-20 12:28:24 +00:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-07-16 19:23:55 +00:00
|
|
|
}
|
|
|
|
|
2014-05-09 10:14:53 +00:00
|
|
|
int
|
2020-05-19 15:27:24 +00:00
|
|
|
rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
|
|
|
|
unsigned int flags)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2016-05-18 11:04:53 +00:00
|
|
|
int ret;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* compilation-time checks */
|
|
|
|
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
|
2014-11-19 12:26:06 +00:00
|
|
|
RTE_CACHE_LINE_MASK) != 0);
|
2012-09-04 12:54:00 +00:00
|
|
|
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
|
2014-11-19 12:26:06 +00:00
|
|
|
RTE_CACHE_LINE_MASK) != 0);
|
2012-09-04 12:54:00 +00:00
|
|
|
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
|
2014-11-19 12:26:06 +00:00
|
|
|
RTE_CACHE_LINE_MASK) != 0);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2020-04-20 12:28:26 +00:00
|
|
|
RTE_BUILD_BUG_ON(offsetof(struct rte_ring_headtail, sync_type) !=
|
|
|
|
offsetof(struct rte_ring_hts_headtail, sync_type));
|
|
|
|
RTE_BUILD_BUG_ON(offsetof(struct rte_ring_headtail, tail) !=
|
|
|
|
offsetof(struct rte_ring_hts_headtail, ht.pos.tail));
|
|
|
|
|
2020-04-20 12:28:24 +00:00
|
|
|
RTE_BUILD_BUG_ON(offsetof(struct rte_ring_headtail, sync_type) !=
|
|
|
|
offsetof(struct rte_ring_rts_headtail, sync_type));
|
|
|
|
RTE_BUILD_BUG_ON(offsetof(struct rte_ring_headtail, tail) !=
|
|
|
|
offsetof(struct rte_ring_rts_headtail, tail.val.pos));
|
|
|
|
|
2020-04-27 23:16:22 +00:00
|
|
|
/* future proof flags, only allow supported values */
|
|
|
|
if (flags & ~RING_F_MASK) {
|
|
|
|
RTE_LOG(ERR, RING,
|
|
|
|
"Unsupported flags requested %#x\n", flags);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-05-09 10:14:53 +00:00
|
|
|
/* init the ring structure */
|
|
|
|
memset(r, 0, sizeof(*r));
|
2019-04-03 14:45:04 +00:00
|
|
|
ret = strlcpy(r->name, name, sizeof(r->name));
|
2016-05-18 11:04:53 +00:00
|
|
|
if (ret < 0 || ret >= (int)sizeof(r->name))
|
|
|
|
return -ENAMETOOLONG;
|
2014-05-09 10:14:53 +00:00
|
|
|
r->flags = flags;
|
2020-04-20 12:28:24 +00:00
|
|
|
ret = get_sync_type(flags, &r->prod.sync_type, &r->cons.sync_type);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2017-06-30 15:06:17 +00:00
|
|
|
|
|
|
|
if (flags & RING_F_EXACT_SZ) {
|
|
|
|
r->size = rte_align32pow2(count + 1);
|
|
|
|
r->mask = r->size - 1;
|
|
|
|
r->capacity = count;
|
|
|
|
} else {
|
|
|
|
if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK)) {
|
|
|
|
RTE_LOG(ERR, RING,
|
|
|
|
"Requested size is invalid, must be power of 2, and not exceed the size limit %u\n",
|
|
|
|
RTE_RING_SZ_MASK);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
r->size = count;
|
|
|
|
r->mask = count - 1;
|
|
|
|
r->capacity = r->mask;
|
|
|
|
}
|
2020-04-20 12:28:24 +00:00
|
|
|
|
|
|
|
/* set default values for head-tail distance */
|
|
|
|
if (flags & RING_F_MP_RTS_ENQ)
|
|
|
|
rte_ring_set_prod_htd_max(r, r->capacity / HTD_MAX_DEF);
|
|
|
|
if (flags & RING_F_MC_RTS_DEQ)
|
|
|
|
rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);
|
2014-05-09 10:14:53 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-18 19:32:43 +00:00
|
|
|
/* create the ring for a given element size */
|
2014-05-09 10:14:53 +00:00
|
|
|
struct rte_ring *
|
2020-01-18 19:32:43 +00:00
|
|
|
rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count,
|
|
|
|
int socket_id, unsigned int flags)
|
2014-05-09 10:14:53 +00:00
|
|
|
{
|
|
|
|
char mz_name[RTE_MEMZONE_NAMESIZE];
|
|
|
|
struct rte_ring *r;
|
2014-06-20 15:42:19 +00:00
|
|
|
struct rte_tailq_entry *te;
|
2014-05-09 10:14:53 +00:00
|
|
|
const struct rte_memzone *mz;
|
|
|
|
ssize_t ring_size;
|
|
|
|
int mz_flags = 0;
|
|
|
|
struct rte_ring_list* ring_list = NULL;
|
2017-06-30 15:06:17 +00:00
|
|
|
const unsigned int requested_count = count;
|
2016-05-18 11:04:53 +00:00
|
|
|
int ret;
|
2014-05-09 10:14:53 +00:00
|
|
|
|
2015-03-04 21:50:08 +00:00
|
|
|
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2017-06-30 15:06:17 +00:00
|
|
|
/* for an exact size ring, round up from count to a power of two */
|
|
|
|
if (flags & RING_F_EXACT_SZ)
|
|
|
|
count = rte_align32pow2(count + 1);
|
|
|
|
|
2020-01-18 19:32:43 +00:00
|
|
|
ring_size = rte_ring_get_memsize_elem(esize, count);
|
2014-05-09 10:14:52 +00:00
|
|
|
if (ring_size < 0) {
|
|
|
|
rte_errno = ring_size;
|
2012-09-04 12:54:00 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-18 11:04:53 +00:00
|
|
|
ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
|
|
|
|
RTE_RING_MZ_PREFIX, name);
|
|
|
|
if (ret < 0 || ret >= (int)sizeof(mz_name)) {
|
|
|
|
rte_errno = ENAMETOOLONG;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-06-20 15:42:19 +00:00
|
|
|
te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
|
|
|
|
if (te == NULL) {
|
|
|
|
RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_write_lock();
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* reserve a memory zone for this ring. If we can't get rte_config or
|
|
|
|
* we are secondary process, the memzone_reserve function will set
|
|
|
|
* rte_errno for us appropriately - hence no check in this this function */
|
2017-06-02 20:12:13 +00:00
|
|
|
mz = rte_memzone_reserve_aligned(mz_name, ring_size, socket_id,
|
|
|
|
mz_flags, __alignof__(*r));
|
2012-12-19 23:00:00 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
r = mz->addr;
|
2014-05-09 10:14:53 +00:00
|
|
|
/* no need to check return value here, we already checked the
|
|
|
|
* arguments above */
|
2017-06-30 15:06:17 +00:00
|
|
|
rte_ring_init(r, name, requested_count, flags);
|
2014-06-20 15:42:19 +00:00
|
|
|
|
|
|
|
te->data = (void *) r;
|
2015-09-30 12:12:20 +00:00
|
|
|
r->memzone = mz;
|
2014-06-20 15:42:19 +00:00
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(ring_list, te, next);
|
2012-12-19 23:00:00 +00:00
|
|
|
} else {
|
|
|
|
r = NULL;
|
|
|
|
RTE_LOG(ERR, RING, "Cannot reserve memory\n");
|
2014-06-20 15:42:19 +00:00
|
|
|
rte_free(te);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_write_unlock();
|
2014-05-09 10:14:53 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2020-01-18 19:32:43 +00:00
|
|
|
/* create the ring */
|
|
|
|
struct rte_ring *
|
|
|
|
rte_ring_create(const char *name, unsigned int count, int socket_id,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
return rte_ring_create_elem(name, sizeof(void *), count, socket_id,
|
|
|
|
flags);
|
|
|
|
}
|
|
|
|
|
2015-10-02 15:53:44 +00:00
|
|
|
/* free the ring */
|
|
|
|
void
|
|
|
|
rte_ring_free(struct rte_ring *r)
|
|
|
|
{
|
|
|
|
struct rte_ring_list *ring_list = NULL;
|
|
|
|
struct rte_tailq_entry *te;
|
|
|
|
|
|
|
|
if (r == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ring was not created with rte_ring_create,
|
|
|
|
* therefore, there is no memzone to free.
|
|
|
|
*/
|
|
|
|
if (r->memzone == NULL) {
|
2019-04-02 15:30:26 +00:00
|
|
|
RTE_LOG(ERR, RING,
|
|
|
|
"Cannot free ring, not created with rte_ring_create()\n");
|
2015-10-02 15:53:44 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rte_memzone_free(r->memzone) != 0) {
|
|
|
|
RTE_LOG(ERR, RING, "Cannot free memory\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_write_lock();
|
2015-10-02 15:53:44 +00:00
|
|
|
|
|
|
|
/* find out tailq entry */
|
|
|
|
TAILQ_FOREACH(te, ring_list, next) {
|
|
|
|
if (te->data == (void *) r)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (te == NULL) {
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_write_unlock();
|
2015-10-02 15:53:44 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_REMOVE(ring_list, te, next);
|
|
|
|
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_write_unlock();
|
2015-10-02 15:53:44 +00:00
|
|
|
|
|
|
|
rte_free(te);
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* dump the status of the ring on the console */
|
|
|
|
void
|
2014-05-02 23:42:56 +00:00
|
|
|
rte_ring_dump(FILE *f, const struct rte_ring *r)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2014-05-02 23:42:56 +00:00
|
|
|
fprintf(f, "ring <%s>@%p\n", r->name, r);
|
|
|
|
fprintf(f, " flags=%x\n", r->flags);
|
2017-03-29 15:21:19 +00:00
|
|
|
fprintf(f, " size=%"PRIu32"\n", r->size);
|
2017-06-30 15:06:17 +00:00
|
|
|
fprintf(f, " capacity=%"PRIu32"\n", r->capacity);
|
2014-05-02 23:42:56 +00:00
|
|
|
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
|
|
|
|
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
|
|
|
|
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
|
|
|
|
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
|
|
|
|
fprintf(f, " used=%u\n", rte_ring_count(r));
|
|
|
|
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* dump the status of all rings on the console */
|
|
|
|
void
|
2014-05-02 23:42:56 +00:00
|
|
|
rte_ring_list_dump(FILE *f)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2014-06-20 15:42:19 +00:00
|
|
|
const struct rte_tailq_entry *te;
|
2012-12-19 23:00:00 +00:00
|
|
|
struct rte_ring_list *ring_list;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-03-04 21:50:08 +00:00
|
|
|
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_read_lock();
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2014-06-20 15:42:19 +00:00
|
|
|
TAILQ_FOREACH(te, ring_list, next) {
|
|
|
|
rte_ring_dump(f, (struct rte_ring *) te->data);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_read_unlock();
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* search a ring from its name */
|
|
|
|
struct rte_ring *
|
|
|
|
rte_ring_lookup(const char *name)
|
|
|
|
{
|
2014-06-20 15:42:19 +00:00
|
|
|
struct rte_tailq_entry *te;
|
|
|
|
struct rte_ring *r = NULL;
|
2012-12-19 23:00:00 +00:00
|
|
|
struct rte_ring_list *ring_list;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-03-04 21:50:08 +00:00
|
|
|
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_read_lock();
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2014-06-20 15:42:19 +00:00
|
|
|
TAILQ_FOREACH(te, ring_list, next) {
|
|
|
|
r = (struct rte_ring *) te->data;
|
2012-09-04 12:54:00 +00:00
|
|
|
if (strncmp(name, r->name, RTE_RING_NAMESIZE) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-05 13:10:28 +00:00
|
|
|
rte_mcfg_tailq_read_unlock();
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2014-06-20 15:42:19 +00:00
|
|
|
if (te == NULL) {
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_errno = ENOENT;
|
2014-06-20 15:42:19 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|