mempool: add namespace to flags
Fix the mempool flags namespace by adding an RTE_ prefix to the name. The old flags remain usable, to be deprecated in the future. Flag MEMPOOL_F_NON_IO added in the release is just renamed to have RTE_ prefix. Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru> Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
parent
925a83a5bf
commit
c47d7b90a1
@ -1299,13 +1299,13 @@ show_mempool(char *name)
|
||||
"\t -- Not used for IO (%c)\n",
|
||||
ptr->name,
|
||||
ptr->socket_id,
|
||||
(flags & MEMPOOL_F_NO_SPREAD) ? 'y' : 'n',
|
||||
(flags & MEMPOOL_F_NO_CACHE_ALIGN) ? 'y' : 'n',
|
||||
(flags & MEMPOOL_F_SP_PUT) ? 'y' : 'n',
|
||||
(flags & MEMPOOL_F_SC_GET) ? 'y' : 'n',
|
||||
(flags & MEMPOOL_F_POOL_CREATED) ? 'y' : 'n',
|
||||
(flags & MEMPOOL_F_NO_IOVA_CONTIG) ? 'y' : 'n',
|
||||
(flags & MEMPOOL_F_NON_IO) ? 'y' : 'n');
|
||||
(flags & RTE_MEMPOOL_F_NO_SPREAD) ? 'y' : 'n',
|
||||
(flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) ? 'y' : 'n',
|
||||
(flags & RTE_MEMPOOL_F_SP_PUT) ? 'y' : 'n',
|
||||
(flags & RTE_MEMPOOL_F_SC_GET) ? 'y' : 'n',
|
||||
(flags & RTE_MEMPOOL_F_POOL_CREATED) ? 'y' : 'n',
|
||||
(flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) ? 'y' : 'n',
|
||||
(flags & RTE_MEMPOOL_F_NON_IO) ? 'y' : 'n');
|
||||
printf(" - Size %u Cache %u element %u\n"
|
||||
" - header %u trailer %u\n"
|
||||
" - private data size %u\n",
|
||||
|
@ -1396,7 +1396,7 @@ launch_args_parse(int argc, char** argv)
|
||||
"noisy-lkup-num-reads-writes must be >= 0\n");
|
||||
}
|
||||
if (!strcmp(lgopts[opt_idx].name, "no-iova-contig"))
|
||||
mempool_flags = MEMPOOL_F_NO_IOVA_CONTIG;
|
||||
mempool_flags = RTE_MEMPOOL_F_NO_IOVA_CONTIG;
|
||||
|
||||
if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
|
||||
char *end = NULL;
|
||||
@ -1440,7 +1440,7 @@ launch_args_parse(int argc, char** argv)
|
||||
rx_mode.offloads = rx_offloads;
|
||||
tx_mode.offloads = tx_offloads;
|
||||
|
||||
if (mempool_flags & MEMPOOL_F_NO_IOVA_CONTIG &&
|
||||
if (mempool_flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG &&
|
||||
mp_alloc_type != MP_ALLOC_ANON) {
|
||||
TESTPMD_LOG(WARNING, "cannot use no-iova-contig without "
|
||||
"mp-alloc=anon. mempool no-iova-contig is "
|
||||
|
@ -215,7 +215,7 @@ static int test_mempool_creation_with_unknown_flag(void)
|
||||
MEMPOOL_ELT_SIZE, 0, 0,
|
||||
NULL, NULL,
|
||||
NULL, NULL,
|
||||
SOCKET_ID_ANY, MEMPOOL_F_NO_IOVA_CONTIG << 1);
|
||||
SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_IOVA_CONTIG << 1);
|
||||
|
||||
if (mp_cov != NULL) {
|
||||
rte_mempool_free(mp_cov);
|
||||
@ -338,8 +338,8 @@ test_mempool_sp_sc(void)
|
||||
my_mp_init, NULL,
|
||||
my_obj_init, NULL,
|
||||
SOCKET_ID_ANY,
|
||||
MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT |
|
||||
MEMPOOL_F_SC_GET);
|
||||
RTE_MEMPOOL_F_NO_CACHE_ALIGN | RTE_MEMPOOL_F_SP_PUT |
|
||||
RTE_MEMPOOL_F_SC_GET);
|
||||
if (mp_spsc == NULL)
|
||||
RET_ERR();
|
||||
}
|
||||
@ -745,14 +745,14 @@ test_mempool_flag_non_io_set_when_no_iova_contig_set(void)
|
||||
|
||||
mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
|
||||
MEMPOOL_ELT_SIZE, 0, 0,
|
||||
SOCKET_ID_ANY, MEMPOOL_F_NO_IOVA_CONTIG);
|
||||
SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_IOVA_CONTIG);
|
||||
RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
|
||||
rte_strerror(rte_errno));
|
||||
rte_mempool_set_ops_byname(mp, rte_mbuf_best_mempool_ops(), NULL);
|
||||
ret = rte_mempool_populate_default(mp);
|
||||
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
|
||||
rte_strerror(-ret));
|
||||
RTE_TEST_ASSERT(mp->flags & MEMPOOL_F_NON_IO,
|
||||
RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
|
||||
"NON_IO flag is not set when NO_IOVA_CONTIG is set");
|
||||
ret = TEST_SUCCESS;
|
||||
exit:
|
||||
@ -789,20 +789,20 @@ test_mempool_flag_non_io_unset_when_populated_with_valid_iova(void)
|
||||
RTE_BAD_IOVA, block_size, NULL, NULL);
|
||||
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
|
||||
rte_strerror(-ret));
|
||||
RTE_TEST_ASSERT(mp->flags & MEMPOOL_F_NON_IO,
|
||||
RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
|
||||
"NON_IO flag is not set when mempool is populated with only RTE_BAD_IOVA");
|
||||
|
||||
ret = rte_mempool_populate_iova(mp, virt, iova, block_size, NULL, NULL);
|
||||
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
|
||||
rte_strerror(-ret));
|
||||
RTE_TEST_ASSERT(!(mp->flags & MEMPOOL_F_NON_IO),
|
||||
RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
|
||||
"NON_IO flag is not unset when mempool is populated with valid IOVA");
|
||||
|
||||
ret = rte_mempool_populate_iova(mp, RTE_PTR_ADD(virt, 2 * block_size),
|
||||
RTE_BAD_IOVA, block_size, NULL, NULL);
|
||||
RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
|
||||
rte_strerror(-ret));
|
||||
RTE_TEST_ASSERT(!(mp->flags & MEMPOOL_F_NON_IO),
|
||||
RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
|
||||
"NON_IO flag is set even when some objects have valid IOVA");
|
||||
ret = TEST_SUCCESS;
|
||||
|
||||
@ -826,7 +826,7 @@ test_mempool_flag_non_io_unset_by_default(void)
|
||||
ret = rte_mempool_populate_default(mp);
|
||||
RTE_TEST_ASSERT_EQUAL(ret, (int)mp->size, "Failed to populate mempool: %s",
|
||||
rte_strerror(-ret));
|
||||
RTE_TEST_ASSERT(!(mp->flags & MEMPOOL_F_NON_IO),
|
||||
RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
|
||||
"NON_IO flag is set by default");
|
||||
ret = TEST_SUCCESS;
|
||||
exit:
|
||||
|
@ -1004,7 +1004,7 @@ Driver options
|
||||
- ``mr_mempool_reg_en`` parameter [int]
|
||||
|
||||
A nonzero value enables implicit registration of DMA memory of all mempools
|
||||
except those having ``MEMPOOL_F_NON_IO``. This flag is set automatically
|
||||
except those having ``RTE_MEMPOOL_F_NON_IO``. This flag is set automatically
|
||||
for mempools populated with non-contiguous objects or those without IOVA.
|
||||
The effect is that when a packet from a mempool is transmitted,
|
||||
its memory is already registered for DMA in the PMD and no registration
|
||||
|
@ -227,9 +227,12 @@ API Changes
|
||||
removed. Its usages have been replaced by a new function
|
||||
``rte_kvargs_get_with_value()``.
|
||||
|
||||
* mempool: Added ``MEMPOOL_F_NON_IO`` flag to give a hint to DPDK components
|
||||
* mempool: Added ``RTE_MEMPOOL_F_NON_IO`` flag to give a hint to DPDK components
|
||||
that objects from this pool will not be used for device IO (e.g. DMA).
|
||||
|
||||
* mempool: The mempool flags ``MEMPOOL_F_*`` will be deprecated in the future.
|
||||
Newly added flags with ``RTE_MEMPOOL_F_`` prefix should be used instead.
|
||||
|
||||
* net: Renamed ``s_addr`` and ``d_addr`` fields of ``rte_ether_hdr`` structure
|
||||
to ``src_addr`` and ``dst_addr``, respectively.
|
||||
|
||||
|
@ -1564,7 +1564,7 @@ int
|
||||
mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
|
||||
struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
|
||||
{
|
||||
if (mp->flags & MEMPOOL_F_NON_IO)
|
||||
if (mp->flags & RTE_MEMPOOL_F_NON_IO)
|
||||
return 0;
|
||||
switch (rte_eal_process_type()) {
|
||||
case RTE_PROC_PRIMARY:
|
||||
@ -1635,7 +1635,7 @@ int
|
||||
mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,
|
||||
struct rte_mempool *mp, struct mlx5_mp_id *mp_id)
|
||||
{
|
||||
if (mp->flags & MEMPOOL_F_NON_IO)
|
||||
if (mp->flags & RTE_MEMPOOL_F_NON_IO)
|
||||
return 0;
|
||||
switch (rte_eal_process_type()) {
|
||||
case RTE_PROC_PRIMARY:
|
||||
|
@ -19,7 +19,7 @@ cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
|
||||
cache_sz /= rte_lcore_count();
|
||||
/* Create chunk pool. */
|
||||
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
|
||||
mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
|
||||
mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
|
||||
plt_tim_dbg("Using single producer mode");
|
||||
tim_ring->prod_type_sp = true;
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
|
||||
}
|
||||
|
||||
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
|
||||
mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
|
||||
mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
|
||||
timvf_log_info("Using single producer mode");
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ tim_chnk_pool_create(struct otx2_tim_ring *tim_ring,
|
||||
cache_sz /= rte_lcore_count();
|
||||
/* Create chunk pool. */
|
||||
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
|
||||
mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
|
||||
mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
|
||||
otx2_tim_dbg("Using single producer mode");
|
||||
tim_ring->prod_type_sp = true;
|
||||
}
|
||||
|
@ -426,7 +426,7 @@ bucket_init_per_lcore(unsigned int lcore_id, void *arg)
|
||||
goto error;
|
||||
|
||||
rg_flags = RING_F_SC_DEQ;
|
||||
if (mp->flags & MEMPOOL_F_SP_PUT)
|
||||
if (mp->flags & RTE_MEMPOOL_F_SP_PUT)
|
||||
rg_flags |= RING_F_SP_ENQ;
|
||||
bd->adoption_buffer_rings[lcore_id] = rte_ring_create(rg_name,
|
||||
rte_align32pow2(mp->size + 1), mp->socket_id, rg_flags);
|
||||
@ -472,7 +472,7 @@ bucket_alloc(struct rte_mempool *mp)
|
||||
goto no_mem_for_data;
|
||||
}
|
||||
bd->pool = mp;
|
||||
if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
|
||||
if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
|
||||
bucket_header_size = sizeof(struct bucket_header);
|
||||
else
|
||||
bucket_header_size = RTE_CACHE_LINE_SIZE;
|
||||
@ -494,9 +494,9 @@ bucket_alloc(struct rte_mempool *mp)
|
||||
goto no_mem_for_stacks;
|
||||
}
|
||||
|
||||
if (mp->flags & MEMPOOL_F_SP_PUT)
|
||||
if (mp->flags & RTE_MEMPOOL_F_SP_PUT)
|
||||
rg_flags |= RING_F_SP_ENQ;
|
||||
if (mp->flags & MEMPOOL_F_SC_GET)
|
||||
if (mp->flags & RTE_MEMPOOL_F_SC_GET)
|
||||
rg_flags |= RING_F_SC_DEQ;
|
||||
rc = snprintf(rg_name, sizeof(rg_name),
|
||||
RTE_MEMPOOL_MZ_FORMAT ".0", mp->name);
|
||||
|
@ -110,9 +110,9 @@ common_ring_alloc(struct rte_mempool *mp)
|
||||
{
|
||||
uint32_t rg_flags = 0;
|
||||
|
||||
if (mp->flags & MEMPOOL_F_SP_PUT)
|
||||
if (mp->flags & RTE_MEMPOOL_F_SP_PUT)
|
||||
rg_flags |= RING_F_SP_ENQ;
|
||||
if (mp->flags & MEMPOOL_F_SC_GET)
|
||||
if (mp->flags & RTE_MEMPOOL_F_SC_GET)
|
||||
rg_flags |= RING_F_SC_DEQ;
|
||||
|
||||
return ring_alloc(mp, rg_flags);
|
||||
|
@ -127,7 +127,7 @@ mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
|
||||
mr_ctrl, mp, addr);
|
||||
/*
|
||||
* Lookup can only fail on invalid input, e.g. "addr"
|
||||
* is not from "mp" or "mp" has MEMPOOL_F_NON_IO set.
|
||||
* is not from "mp" or "mp" has RTE_MEMPOOL_F_NON_IO set.
|
||||
*/
|
||||
if (lkey != UINT32_MAX)
|
||||
return lkey;
|
||||
|
@ -1124,7 +1124,7 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
|
||||
|
||||
txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
|
||||
0, 0, dev->node,
|
||||
MEMPOOL_F_NO_SPREAD);
|
||||
RTE_MEMPOOL_F_NO_SPREAD);
|
||||
txq->nb_sqb_bufs = nb_sqb_bufs;
|
||||
txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
|
||||
txq->nb_sqb_bufs_adj = nb_sqb_bufs -
|
||||
@ -1150,7 +1150,7 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
|
||||
tmp = rte_mempool_calc_obj_size(blk_sz, RTE_MEMPOOL_F_NO_SPREAD, &sz);
|
||||
if (dev->sqb_size != sz.elt_size) {
|
||||
otx2_err("sqe pool block size is not expected %d != %d",
|
||||
dev->sqb_size, tmp);
|
||||
|
@ -1302,7 +1302,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
|
||||
}
|
||||
|
||||
/* Mempool memory must be physically contiguous */
|
||||
if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
|
||||
if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG) {
|
||||
PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
|
||||
sz = (sz != NULL) ? sz : &lsz;
|
||||
|
||||
sz->header_size = sizeof(struct rte_mempool_objhdr);
|
||||
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
|
||||
if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0)
|
||||
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
|
||||
RTE_MEMPOOL_ALIGN);
|
||||
|
||||
@ -242,7 +242,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
|
||||
sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
|
||||
|
||||
/* expand trailer to next cache line */
|
||||
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
|
||||
if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
|
||||
sz->total_size = sz->header_size + sz->elt_size +
|
||||
sz->trailer_size;
|
||||
sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
|
||||
@ -254,7 +254,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
|
||||
* increase trailer to add padding between objects in order to
|
||||
* spread them across memory channels/ranks
|
||||
*/
|
||||
if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
|
||||
if ((flags & RTE_MEMPOOL_F_NO_SPREAD) == 0) {
|
||||
unsigned new_size;
|
||||
new_size = arch_mem_object_align
|
||||
(sz->header_size + sz->elt_size + sz->trailer_size);
|
||||
@ -306,11 +306,11 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
|
||||
int ret;
|
||||
|
||||
/* create the internal ring if not already done */
|
||||
if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
|
||||
if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED) == 0) {
|
||||
ret = rte_mempool_ops_alloc(mp);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
mp->flags |= MEMPOOL_F_POOL_CREATED;
|
||||
mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -348,7 +348,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
memhdr->free_cb = free_cb;
|
||||
memhdr->opaque = opaque;
|
||||
|
||||
if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
|
||||
if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
|
||||
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
|
||||
else
|
||||
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
|
||||
@ -374,7 +374,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
||||
|
||||
/* At least some objects in the pool can now be used for IO. */
|
||||
if (iova != RTE_BAD_IOVA)
|
||||
mp->flags &= ~MEMPOOL_F_NON_IO;
|
||||
mp->flags &= ~RTE_MEMPOOL_F_NON_IO;
|
||||
|
||||
/* Report the mempool as ready only when fully populated. */
|
||||
if (mp->populated_size >= mp->size)
|
||||
@ -413,7 +413,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
||||
size_t off, phys_len;
|
||||
int ret, cnt = 0;
|
||||
|
||||
if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
|
||||
if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG)
|
||||
return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
|
||||
len, free_cb, opaque);
|
||||
|
||||
@ -470,7 +470,7 @@ rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
alloc_in_ext_mem = (ret == 1);
|
||||
need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
|
||||
need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
|
||||
|
||||
if (!need_iova_contig_obj)
|
||||
*pg_sz = 0;
|
||||
@ -547,7 +547,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
||||
* reserve space in smaller chunks.
|
||||
*/
|
||||
|
||||
need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
|
||||
need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
|
||||
ret = rte_mempool_get_page_size(mp, &pg_sz);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -798,12 +798,12 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache)
|
||||
rte_free(cache);
|
||||
}
|
||||
|
||||
#define MEMPOOL_KNOWN_FLAGS (MEMPOOL_F_NO_SPREAD \
|
||||
| MEMPOOL_F_NO_CACHE_ALIGN \
|
||||
| MEMPOOL_F_SP_PUT \
|
||||
| MEMPOOL_F_SC_GET \
|
||||
| MEMPOOL_F_POOL_CREATED \
|
||||
| MEMPOOL_F_NO_IOVA_CONTIG \
|
||||
#define MEMPOOL_KNOWN_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
|
||||
| RTE_MEMPOOL_F_NO_CACHE_ALIGN \
|
||||
| RTE_MEMPOOL_F_SP_PUT \
|
||||
| RTE_MEMPOOL_F_SC_GET \
|
||||
| RTE_MEMPOOL_F_POOL_CREATED \
|
||||
| RTE_MEMPOOL_F_NO_IOVA_CONTIG \
|
||||
)
|
||||
/* create an empty mempool */
|
||||
struct rte_mempool *
|
||||
@ -859,11 +859,11 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
|
||||
* No objects in the pool can be used for IO until it's populated
|
||||
* with at least some objects with valid IOVA.
|
||||
*/
|
||||
flags |= MEMPOOL_F_NON_IO;
|
||||
flags |= RTE_MEMPOOL_F_NON_IO;
|
||||
|
||||
/* "no cache align" imply "no spread" */
|
||||
if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
|
||||
flags |= MEMPOOL_F_NO_SPREAD;
|
||||
if (flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
|
||||
flags |= RTE_MEMPOOL_F_NO_SPREAD;
|
||||
|
||||
/* calculate mempool object sizes. */
|
||||
if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
|
||||
@ -975,11 +975,11 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
|
||||
* Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
|
||||
* set the correct index into the table of ops structs.
|
||||
*/
|
||||
if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
|
||||
if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET))
|
||||
ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
|
||||
else if (flags & MEMPOOL_F_SP_PUT)
|
||||
else if (flags & RTE_MEMPOOL_F_SP_PUT)
|
||||
ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
|
||||
else if (flags & MEMPOOL_F_SC_GET)
|
||||
else if (flags & RTE_MEMPOOL_F_SC_GET)
|
||||
ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
|
||||
else
|
||||
ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
|
||||
|
@ -251,19 +251,44 @@ struct rte_mempool {
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/** Spreading among memory channels not required. */
|
||||
#define MEMPOOL_F_NO_SPREAD 0x0001
|
||||
#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
|
||||
/**
|
||||
* Backward compatibility synonym for RTE_MEMPOOL_F_NO_SPREAD.
|
||||
* To be deprecated.
|
||||
*/
|
||||
#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
|
||||
/** Do not align objects on cache lines. */
|
||||
#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002
|
||||
#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
|
||||
/**
|
||||
* Backward compatibility synonym for RTE_MEMPOOL_F_NO_CACHE_ALIGN.
|
||||
* To be deprecated.
|
||||
*/
|
||||
#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
|
||||
/** Default put is "single-producer". */
|
||||
#define MEMPOOL_F_SP_PUT 0x0004
|
||||
#define RTE_MEMPOOL_F_SP_PUT 0x0004
|
||||
/**
|
||||
* Backward compatibility synonym for RTE_MEMPOOL_F_SP_PUT.
|
||||
* To be deprecated.
|
||||
*/
|
||||
#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
|
||||
/** Default get is "single-consumer". */
|
||||
#define MEMPOOL_F_SC_GET 0x0008
|
||||
#define RTE_MEMPOOL_F_SC_GET 0x0008
|
||||
/**
|
||||
* Backward compatibility synonym for RTE_MEMPOOL_F_SC_GET.
|
||||
* To be deprecated.
|
||||
*/
|
||||
#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
|
||||
/** Internal: pool is created. */
|
||||
#define MEMPOOL_F_POOL_CREATED 0x0010
|
||||
#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
|
||||
/** Don't need IOVA contiguous objects. */
|
||||
#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020
|
||||
#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
|
||||
/**
|
||||
* Backward compatibility synonym for RTE_MEMPOOL_F_NO_IOVA_CONTIG.
|
||||
* To be deprecated.
|
||||
*/
|
||||
#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
|
||||
/** Internal: no object from the pool can be used for device IO (DMA). */
|
||||
#define MEMPOOL_F_NON_IO 0x0040
|
||||
#define RTE_MEMPOOL_F_NON_IO 0x0040
|
||||
|
||||
/**
|
||||
* @internal When debug is enabled, store some statistics.
|
||||
@ -426,9 +451,9 @@ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
|
||||
* Calculate memory size required to store given number of objects.
|
||||
*
|
||||
* If mempool objects are not required to be IOVA-contiguous
|
||||
* (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
|
||||
* (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
|
||||
* virtually contiguous chunk size. Otherwise, if mempool objects must
|
||||
* be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear),
|
||||
* be IOVA-contiguous (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is clear),
|
||||
* min_chunk_size defines IOVA-contiguous chunk size.
|
||||
*
|
||||
* @param[in] mp
|
||||
@ -976,22 +1001,22 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
|
||||
* constraint for the reserved zone.
|
||||
* @param flags
|
||||
* The *flags* arguments is an OR of following flags:
|
||||
* - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
|
||||
* - RTE_MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
|
||||
* between channels in RAM: the pool allocator will add padding
|
||||
* between objects depending on the hardware configuration. See
|
||||
* Memory alignment constraints for details. If this flag is set,
|
||||
* the allocator will just align them to a cache line.
|
||||
* - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
|
||||
* - RTE_MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
|
||||
* cache-aligned. This flag removes this constraint, and no
|
||||
* padding will be present between objects. This flag implies
|
||||
* MEMPOOL_F_NO_SPREAD.
|
||||
* - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
|
||||
* RTE_MEMPOOL_F_NO_SPREAD.
|
||||
* - RTE_MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
|
||||
* when using rte_mempool_put() or rte_mempool_put_bulk() is
|
||||
* "single-producer". Otherwise, it is "multi-producers".
|
||||
* - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
|
||||
* - RTE_MEMPOOL_F_SC_GET: If this flag is set, the default behavior
|
||||
* when using rte_mempool_get() or rte_mempool_get_bulk() is
|
||||
* "single-consumer". Otherwise, it is "multi-consumers".
|
||||
* - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
|
||||
* - RTE_MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
|
||||
* necessarily be contiguous in IO memory.
|
||||
* @return
|
||||
* The pointer to the new allocated mempool, on success. NULL on error
|
||||
@ -1678,7 +1703,7 @@ rte_mempool_empty(const struct rte_mempool *mp)
|
||||
* A pointer (virtual address) to the element of the pool.
|
||||
* @return
|
||||
* The IO address of the elt element.
|
||||
* If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the
|
||||
* If the mempool was created with RTE_MEMPOOL_F_NO_IOVA_CONTIG, the
|
||||
* returned value is RTE_BAD_IOVA.
|
||||
*/
|
||||
static inline rte_iova_t
|
||||
|
@ -168,7 +168,7 @@ rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
|
||||
unsigned i;
|
||||
|
||||
/* too late, the mempool is already populated. */
|
||||
if (mp->flags & MEMPOOL_F_POOL_CREATED)
|
||||
if (mp->flags & RTE_MEMPOOL_F_POOL_CREATED)
|
||||
return -EEXIST;
|
||||
|
||||
for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
|
||||
|
@ -371,7 +371,8 @@ pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
|
||||
rte_errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
if (mp->flags & MEMPOOL_F_SP_PUT || mp->flags & MEMPOOL_F_SC_GET) {
|
||||
if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
|
||||
mp->flags & RTE_MEMPOOL_F_SC_GET) {
|
||||
PDUMP_LOG(ERR,
|
||||
"mempool with SP or SC set not valid for pdump,"
|
||||
"must have MP and MC set\n");
|
||||
|
@ -321,8 +321,8 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
|
||||
vq->iotlb_pool = rte_mempool_create(pool_name,
|
||||
IOTLB_CACHE_SIZE, sizeof(struct vhost_iotlb_entry), 0,
|
||||
0, 0, NULL, NULL, NULL, socket,
|
||||
MEMPOOL_F_NO_CACHE_ALIGN |
|
||||
MEMPOOL_F_SP_PUT);
|
||||
RTE_MEMPOOL_F_NO_CACHE_ALIGN |
|
||||
RTE_MEMPOOL_F_SP_PUT);
|
||||
if (!vq->iotlb_pool) {
|
||||
VHOST_LOG_CONFIG(ERR,
|
||||
"Failed to create IOTLB cache pool (%s)\n",
|
||||
|
Loading…
Reference in New Issue
Block a user