mempool: fix slow allocation of large pools
When allocating a mempool which is larger than the largest available area, it can take a lot of time: a- the mempool calculate the required memory size, and tries to allocate it, it fails b- then it tries to allocate the largest available area (this does not request new huge pages) c- add this zone to the mempool, this triggers the allocation of a mem hdr, which request a new huge page d- back to a- until mempool is populated or until there is no more memory This can take a lot of time to finally fail (several minutes): in step a- it takes all available hugepages on the system, then release them after it fails. The problem appeared with commiteba11e3646
("mempool: reduce wasted space on populate"), because smaller chunks are now allowed. Previously, it had to be at least one page size, which is not the case in step b-. To fix this, implement our own way to allocate the largest available area instead of using the feature from memzone: if an allocation fails, try to divide the size by 2 and retry. When the requested size falls below min_chunk_size, stop and return an error. Fixes:eba11e3646
("mempool: reduce wasted space on populate") Cc: stable@dpdk.org Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Anatoly Burakov <anatoly.burakov@intel.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com> Tested-by: Ali Alnubani <alialnu@mellanox.com>
This commit is contained in:
parent
f159c61c35
commit
3a3d0c75b4
@ -463,6 +463,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
||||
unsigned mz_id, n;
|
||||
int ret;
|
||||
bool need_iova_contig_obj;
|
||||
size_t max_alloc_size = SIZE_MAX;
|
||||
|
||||
ret = mempool_ops_alloc_once(mp);
|
||||
if (ret != 0)
|
||||
@ -542,30 +543,24 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
||||
if (min_chunk_size == (size_t)mem_size)
|
||||
mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
|
||||
|
||||
mz = rte_memzone_reserve_aligned(mz_name, mem_size,
|
||||
/* Allocate a memzone, retrying with a smaller area on ENOMEM */
|
||||
do {
|
||||
mz = rte_memzone_reserve_aligned(mz_name,
|
||||
RTE_MIN((size_t)mem_size, max_alloc_size),
|
||||
mp->socket_id, mz_flags, align);
|
||||
|
||||
/* don't try reserving with 0 size if we were asked to reserve
|
||||
* IOVA-contiguous memory.
|
||||
*/
|
||||
if (min_chunk_size < (size_t)mem_size && mz == NULL) {
|
||||
/* not enough memory, retry with the biggest zone we
|
||||
* have
|
||||
*/
|
||||
mz = rte_memzone_reserve_aligned(mz_name, 0,
|
||||
mp->socket_id, mz_flags, align);
|
||||
}
|
||||
if (mz == NULL && rte_errno != ENOMEM)
|
||||
break;
|
||||
|
||||
max_alloc_size = RTE_MIN(max_alloc_size,
|
||||
(size_t)mem_size) / 2;
|
||||
} while (mz == NULL && max_alloc_size >= min_chunk_size);
|
||||
|
||||
if (mz == NULL) {
|
||||
ret = -rte_errno;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (mz->len < min_chunk_size) {
|
||||
rte_memzone_free(mz);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (need_iova_contig_obj)
|
||||
iova = mz->iova;
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user