mempool: deprecate xmem functions

Move rte_mempool_xmem_size() code to internal helper function
since it is required in two places: deprecated rte_mempool_xmem_size()
and non-deprecated rte_mempool_op_calc_mem_size_default().

Suggested-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
Andrew Rybchenko 2018-04-16 14:24:36 +01:00 committed by Thomas Monjalon
parent ce1f2c61ed
commit fd943c764a
8 changed files with 70 additions and 43 deletions

View File

@ -48,13 +48,6 @@ Deprecation Notices
- ``rte_eal_mbuf_default_mempool_ops``
* mempool: several API and ABI changes are planned in v18.05.
The following functions, introduced for Xen, which is not supported
anymore since v17.11, are hard to use, not used anywhere else in DPDK.
Therefore they will be deprecated in v18.05 and removed in v18.08:
- ``rte_mempool_xmem_create``
- ``rte_mempool_xmem_size``
- ``rte_mempool_xmem_usage``
The following changes are planned:

View File

@ -156,6 +156,13 @@ API Changes
Now the new driver callbacks ``calc_mem_size`` and ``populate`` may be
used to achieve it without specific knowledge in the generic code.
* mempool: xmem functions have been deprecated:
- ``rte_mempool_xmem_create``
- ``rte_mempool_xmem_size``
- ``rte_mempool_xmem_usage``
- ``rte_mempool_populate_iova_tab``
* mbuf: The control mbuf API has been removed in v18.05. The impacted
functions and macros are:

View File

@ -7,6 +7,9 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_mempool.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
# Allow deprecated symbol to use deprecated rte_mempool_populate_iova_tab()
# from earlier deprecated rte_mempool_populate_phys_tab()
CFLAGS += -Wno-deprecated-declarations
LDLIBS += -lrte_eal -lrte_ring
EXPORT_MAP := rte_mempool_version.map

View File

@ -1,6 +1,18 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
extra_flags = []
# Allow deprecated symbol to use deprecated rte_mempool_populate_iova_tab()
# from earlier deprecated rte_mempool_populate_phys_tab()
extra_flags += '-Wno-deprecated-declarations'
foreach flag: extra_flags
if cc.has_argument(flag)
cflags += flag
endif
endforeach
version = 4
sources = files('rte_mempool.c', 'rte_mempool_ops.c',
'rte_mempool_ops_default.c')

View File

@ -227,11 +227,13 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
/*
* Calculate maximum amount of memory required to store given number of objects.
* Internal function to calculate required memory chunk size shared
* by default implementation of the corresponding callback and
* deprecated external function.
*/
size_t
rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
__rte_unused unsigned int flags)
rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz,
uint32_t pg_shift)
{
size_t obj_per_page, pg_num, pg_sz;
@ -250,6 +252,17 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
return pg_num << pg_shift;
}
/*
* Calculate maximum amount of memory required to store given number of objects.
*/
size_t
rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
__rte_unused unsigned int flags)
{
return rte_mempool_calc_mem_size_helper(elt_num, total_elt_sz,
pg_shift);
}
/*
* Calculate how much memory would be actually required with the
* given memory footprint to store required number of elements.

View File

@ -426,6 +426,28 @@ ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align);
/**
* @internal Helper function to calculate memory size required to store
* specified number of objects in assumption that the memory buffer will
* be aligned at page boundary.
*
* Note that if object size is bigger than page size, then it assumes
* that pages are grouped in subsets of physically continuous pages big
* enough to store at least one object.
*
* @param elt_num
* Number of elements.
* @param total_elt_sz
* The size of each element, including header and trailer, as returned
* by rte_mempool_calc_obj_size().
* @param pg_shift
* LOG2 of the physical pages size. If set to 0, ignore page boundaries.
* @return
* Required memory size aligned at page boundary.
*/
size_t rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz,
uint32_t pg_shift);
/**
* Function to be called for each populated object.
*
@ -855,6 +877,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
int socket_id, unsigned flags);
/**
* @deprecated
* Create a new mempool named *name* in memory.
*
* The pool contains n elements of elt_size. Its size is set to n.
@ -912,6 +935,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* The pointer to the new allocated mempool, on success. NULL on error
* with rte_errno set appropriately. See rte_mempool_create() for details.
*/
__rte_deprecated
struct rte_mempool *
rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
@ -1008,6 +1032,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
void *opaque);
/**
* @deprecated
* Add physical memory for objects in the pool at init
*
* Add a virtually contiguous memory chunk in the pool where objects can
@ -1033,6 +1058,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* On error, the chunks are not added in the memory list of the
* mempool and a negative errno is returned.
*/
__rte_deprecated
int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
@ -1652,6 +1678,7 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
struct rte_mempool_objsz *sz);
/**
* @deprecated
* Get the size of memory required to store mempool elements.
*
* Calculate the maximum amount of memory required to store given number
@ -1674,10 +1701,12 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
* @return
* Required memory size aligned at page boundary.
*/
__rte_deprecated
size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
uint32_t pg_shift, unsigned int flags);
/**
* @deprecated
* Get the size of memory required to store mempool elements.
*
* Calculate how much memory would be actually required with the given
@ -1705,6 +1734,7 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
* buffer is too small, return a negative value whose absolute value
* is the actual number of elements that can be stored in that buffer.
*/
__rte_deprecated
ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
uint32_t pg_shift, unsigned int flags);

View File

@ -16,8 +16,8 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
mem_size = rte_mempool_xmem_size(obj_num, total_elt_sz, pg_shift,
mp->flags);
mem_size = rte_mempool_calc_mem_size_helper(obj_num, total_elt_sz,
pg_shift);
*min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);

View File

@ -444,34 +444,6 @@ test_mempool_same_name_twice_creation(void)
return 0;
}
/*
* Basic test for mempool_xmem functions.
*/
static int
test_mempool_xmem_misc(void)
{
uint32_t elt_num, total_size;
size_t sz;
ssize_t usz;
elt_num = MAX_KEEP;
total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL);
sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX,
0);
usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1,
MEMPOOL_PG_SHIFT_MAX, 0);
if (sz != (size_t)usz) {
printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
"returns: %#zx, while expected: %#zx;\n",
__func__, elt_num, total_size, sz, (size_t)usz);
return -1;
}
return 0;
}
static void
walk_cb(struct rte_mempool *mp, void *userdata __rte_unused)
{
@ -596,9 +568,6 @@ test_mempool(void)
if (test_mempool_same_name_twice_creation() < 0)
goto err;
if (test_mempool_xmem_misc() < 0)
goto err;
/* test the stack handler */
if (test_mempool_basic(mp_stack, 1) < 0)
goto err;