mempool: allow populating with unaligned virtual area
rte_mempool_populate_virt() currently requires that both addr and length are page-aligned. Remove this unneeded constraint which can be annoying with big hugepages (ex: 1GB). Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
This commit is contained in:
parent
23a2489a83
commit
354788b60c
@ -368,17 +368,11 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
|||||||
size_t off, phys_len;
|
size_t off, phys_len;
|
||||||
int ret, cnt = 0;
|
int ret, cnt = 0;
|
||||||
|
|
||||||
/* address and len must be page-aligned */
|
|
||||||
if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr)
|
|
||||||
return -EINVAL;
|
|
||||||
if (RTE_ALIGN_CEIL(len, pg_sz) != len)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
|
if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
|
||||||
return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
|
return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
|
||||||
len, free_cb, opaque);
|
len, free_cb, opaque);
|
||||||
|
|
||||||
for (off = 0; off + pg_sz <= len &&
|
for (off = 0; off < len &&
|
||||||
mp->populated_size < mp->size; off += phys_len) {
|
mp->populated_size < mp->size; off += phys_len) {
|
||||||
|
|
||||||
iova = rte_mem_virt2iova(addr + off);
|
iova = rte_mem_virt2iova(addr + off);
|
||||||
@ -389,12 +383,18 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* populate with the largest group of contiguous pages */
|
/* populate with the largest group of contiguous pages */
|
||||||
for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
|
for (phys_len = RTE_MIN(
|
||||||
|
(size_t)(RTE_PTR_ALIGN_CEIL(addr + off + 1, pg_sz) -
|
||||||
|
(addr + off)),
|
||||||
|
len - off);
|
||||||
|
off + phys_len < len;
|
||||||
|
phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
|
||||||
rte_iova_t iova_tmp;
|
rte_iova_t iova_tmp;
|
||||||
|
|
||||||
iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
|
iova_tmp = rte_mem_virt2iova(addr + off + phys_len);
|
||||||
|
|
||||||
if (iova_tmp != iova + phys_len)
|
if (iova_tmp == RTE_BAD_IOVA ||
|
||||||
|
iova_tmp != iova + phys_len)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,8 +575,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
|||||||
* have
|
* have
|
||||||
*/
|
*/
|
||||||
mz = rte_memzone_reserve_aligned(mz_name, 0,
|
mz = rte_memzone_reserve_aligned(mz_name, 0,
|
||||||
mp->socket_id, flags,
|
mp->socket_id, flags, align);
|
||||||
RTE_MAX(pg_sz, align));
|
|
||||||
}
|
}
|
||||||
if (mz == NULL) {
|
if (mz == NULL) {
|
||||||
ret = -rte_errno;
|
ret = -rte_errno;
|
||||||
@ -601,7 +600,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
|
|||||||
(void *)(uintptr_t)mz);
|
(void *)(uintptr_t)mz);
|
||||||
else
|
else
|
||||||
ret = rte_mempool_populate_virt(mp, mz->addr,
|
ret = rte_mempool_populate_virt(mp, mz->addr,
|
||||||
RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz,
|
mz->len, pg_sz,
|
||||||
rte_mempool_memchunk_mz_free,
|
rte_mempool_memchunk_mz_free,
|
||||||
(void *)(uintptr_t)mz);
|
(void *)(uintptr_t)mz);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
|
@ -1042,9 +1042,8 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
|
|||||||
* A pointer to the mempool structure.
|
* A pointer to the mempool structure.
|
||||||
* @param addr
|
* @param addr
|
||||||
* The virtual address of memory that should be used to store objects.
|
* The virtual address of memory that should be used to store objects.
|
||||||
* Must be page-aligned.
|
|
||||||
* @param len
|
* @param len
|
||||||
* The length of memory in bytes. Must be page-aligned.
|
* The length of memory in bytes.
|
||||||
* @param pg_sz
|
* @param pg_sz
|
||||||
* The size of memory pages in this virtual area.
|
* The size of memory pages in this virtual area.
|
||||||
* @param free_cb
|
* @param free_cb
|
||||||
|
Loading…
Reference in New Issue
Block a user