mempool: introduce function to get mempool page size
In rte_mempool_populate_default(), we determine the page size, which is needed for calc_size and allocation of memory. Move this in a function and export it, it will be used in a next commit. Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
This commit is contained in:
parent
035ee5bea5
commit
b291e69423
@ -414,6 +414,33 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Get the minimal page size used in a mempool before populating it. */
|
||||||
|
int
|
||||||
|
rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
|
||||||
|
{
|
||||||
|
bool need_iova_contig_obj;
|
||||||
|
bool alloc_in_ext_mem;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* check if we can retrieve a valid socket ID */
|
||||||
|
ret = rte_malloc_heap_socket_is_external(mp->socket_id);
|
||||||
|
if (ret < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
alloc_in_ext_mem = (ret == 1);
|
||||||
|
need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
|
||||||
|
|
||||||
|
if (!need_iova_contig_obj)
|
||||||
|
*pg_sz = 0;
|
||||||
|
else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA)
|
||||||
|
*pg_sz = 0;
|
||||||
|
else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
|
||||||
|
*pg_sz = get_min_page_size(mp->socket_id);
|
||||||
|
else
|
||||||
|
*pg_sz = getpagesize();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Default function to populate the mempool: allocate memory in memzones,
|
/* Default function to populate the mempool: allocate memory in memzones,
|
||||||
* and populate them. Return the number of objects added, or a negative
|
* and populate them. Return the number of objects added, or a negative
|
||||||
* value on error.
|
* value on error.
|
||||||
@ -425,12 +452,11 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
|||||||
char mz_name[RTE_MEMZONE_NAMESIZE];
|
char mz_name[RTE_MEMZONE_NAMESIZE];
|
||||||
const struct rte_memzone *mz;
|
const struct rte_memzone *mz;
|
||||||
ssize_t mem_size;
|
ssize_t mem_size;
|
||||||
size_t align, pg_sz, pg_shift;
|
size_t align, pg_sz, pg_shift = 0;
|
||||||
rte_iova_t iova;
|
rte_iova_t iova;
|
||||||
unsigned mz_id, n;
|
unsigned mz_id, n;
|
||||||
int ret;
|
int ret;
|
||||||
bool need_iova_contig_obj;
|
bool need_iova_contig_obj;
|
||||||
bool alloc_in_ext_mem;
|
|
||||||
|
|
||||||
ret = mempool_ops_alloc_once(mp);
|
ret = mempool_ops_alloc_once(mp);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
@ -485,26 +511,13 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
|||||||
* synonymous with IOVA contiguousness will not hold.
|
* synonymous with IOVA contiguousness will not hold.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* check if we can retrieve a valid socket ID */
|
|
||||||
ret = rte_malloc_heap_socket_is_external(mp->socket_id);
|
|
||||||
if (ret < 0)
|
|
||||||
return -EINVAL;
|
|
||||||
alloc_in_ext_mem = (ret == 1);
|
|
||||||
need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
|
need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
|
||||||
|
ret = rte_mempool_get_page_size(mp, &pg_sz);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (!need_iova_contig_obj) {
|
if (pg_sz != 0)
|
||||||
pg_sz = 0;
|
|
||||||
pg_shift = 0;
|
|
||||||
} else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) {
|
|
||||||
pg_sz = 0;
|
|
||||||
pg_shift = 0;
|
|
||||||
} else if (rte_eal_has_hugepages() || alloc_in_ext_mem) {
|
|
||||||
pg_sz = get_min_page_size(mp->socket_id);
|
|
||||||
pg_shift = rte_bsf32(pg_sz);
|
pg_shift = rte_bsf32(pg_sz);
|
||||||
} else {
|
|
||||||
pg_sz = getpagesize();
|
|
||||||
pg_shift = rte_bsf32(pg_sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
|
for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
|
||||||
size_t min_chunk_size;
|
size_t min_chunk_size;
|
||||||
|
@ -1688,6 +1688,17 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
|
|||||||
void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
|
void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
|
||||||
void *arg);
|
void *arg);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @warning
|
||||||
|
* @b EXPERIMENTAL: this API may change without prior notice.
|
||||||
|
*
|
||||||
|
* @internal Get page size used for mempool object allocation.
|
||||||
|
* This function is internal to mempool library and mempool drivers.
|
||||||
|
*/
|
||||||
|
__rte_experimental
|
||||||
|
int
|
||||||
|
rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -56,5 +56,9 @@ DPDK_18.05 {
|
|||||||
EXPERIMENTAL {
|
EXPERIMENTAL {
|
||||||
global:
|
global:
|
||||||
|
|
||||||
|
# added in 18.05
|
||||||
rte_mempool_ops_get_info;
|
rte_mempool_ops_get_info;
|
||||||
|
|
||||||
|
# added in 19.11
|
||||||
|
rte_mempool_get_page_size;
|
||||||
};
|
};
|
||||||
|
Loading…
x
Reference in New Issue
Block a user