mempool: fold memory size calculation helper

rte_mempool_calc_mem_size_helper() was introduced to avoid
code duplication and used in deprecated rte_mempool_mem_size() and
rte_mempool_op_calc_mem_size_default(). Now the first one is removed
and it is better to fold the helper into the second one to make it
more readable.

Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
This commit is contained in:
Andrew Rybchenko 2018-07-27 14:46:05 +01:00 committed by Thomas Monjalon
parent e2594a36e9
commit 91ad034919
3 changed files with 22 additions and 50 deletions

View File

@ -225,31 +225,6 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
return sz->total_size;
}
/*
* Internal function to calculate required memory chunk size.
*/
size_t
rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz,
uint32_t pg_shift)
{
size_t obj_per_page, pg_num, pg_sz;
if (total_elt_sz == 0)
return 0;
if (pg_shift == 0)
return total_elt_sz * elt_num;
pg_sz = (size_t)1 << pg_shift;
obj_per_page = pg_sz / total_elt_sz;
if (obj_per_page == 0)
return RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * elt_num;
pg_num = (elt_num + obj_per_page - 1) / obj_per_page;
return pg_num << pg_shift;
}
/* free a memchunk allocated with rte_memzone_reserve() */
static void
rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,

View File

@ -487,28 +487,6 @@ ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
size_t *min_chunk_size, size_t *align);
/**
* @internal Helper function to calculate memory size required to store
* specified number of objects in assumption that the memory buffer will
* be aligned at page boundary.
*
* Note that if object size is bigger than page size, then it assumes
* that pages are grouped in subsets of physically continuous pages big
* enough to store at least one object.
*
* @param elt_num
* Number of elements.
* @param total_elt_sz
* The size of each element, including header and trailer, as returned
* by rte_mempool_calc_obj_size().
* @param pg_shift
* LOG2 of the physical pages size. If set to 0, ignore page boundaries.
* @return
* Required memory size aligned at page boundary.
*/
size_t rte_mempool_calc_mem_size_helper(uint32_t elt_num, size_t total_elt_sz,
uint32_t pg_shift);
/**
* Function to be called for each populated object.
*

View File

@ -12,12 +12,31 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
size_t *min_chunk_size, size_t *align)
{
size_t total_elt_sz;
size_t obj_per_page, pg_num, pg_sz;
size_t mem_size;
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
mem_size = rte_mempool_calc_mem_size_helper(obj_num, total_elt_sz,
pg_shift);
if (total_elt_sz == 0) {
mem_size = 0;
} else if (pg_shift == 0) {
mem_size = total_elt_sz * obj_num;
} else {
pg_sz = (size_t)1 << pg_shift;
obj_per_page = pg_sz / total_elt_sz;
if (obj_per_page == 0) {
/*
* Note that if object size is bigger than page size,
* then it is assumed that pages are grouped in subsets
* of physically continuous pages big enough to store
* at least one object.
*/
mem_size =
RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;
} else {
pg_num = (obj_num + obj_per_page - 1) / obj_per_page;
mem_size = pg_num << pg_shift;
}
}
*min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);