vfio: use contiguous mapping for IOVA as VA mode
When using IOVA as VA mode, there is no need to map segments
page by page. This normally isn't a problem, but it becomes one
when attempting to use DPDK in no-huge mode, where VFIO subsystem
simply runs out of space to store mappings.
Fix this for x86 by triggering different callbacks based on whether
IOVA as VA mode is enabled.
Fixes: 73a6390859
("vfio: allow to map other memory regions")
Cc: stable@dpdk.org
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Andrius Sirvys <andrius.sirvys@intel.com>
This commit is contained in:
parent
b671987985
commit
78a6d7ed19
@ -1231,6 +1231,19 @@ rte_vfio_get_group_num(const char *sysfs_base,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
type1_map_contig(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
|
||||
size_t len, void *arg)
|
||||
{
|
||||
int *vfio_container_fd = arg;
|
||||
|
||||
if (msl->external)
|
||||
return 0;
|
||||
|
||||
return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
|
||||
len, 1);
|
||||
}
|
||||
|
||||
static int
|
||||
type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
|
||||
void *arg)
|
||||
@ -1300,6 +1313,13 @@ vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
|
||||
static int
|
||||
vfio_type1_dma_map(int vfio_container_fd)
|
||||
{
|
||||
if (rte_eal_iova_mode() == RTE_IOVA_VA) {
|
||||
/* with IOVA as VA mode, we can get away with mapping contiguous
|
||||
* chunks rather than going page-by-page.
|
||||
*/
|
||||
return rte_memseg_contig_walk(type1_map_contig,
|
||||
&vfio_container_fd);
|
||||
}
|
||||
return rte_memseg_walk(type1_map, &vfio_container_fd);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user