mem: use DMA mask check for legacy memory
If a device reports addressing limitations through a dma mask, the IOVAs for mapped memory needs to be checked out for ensuring correct functionality. Previous patches introduced this DMA check for main memory code currently being used but other options like legacy memory and the no hugepages option need to be also considered. This patch adds the DMA check for those cases. Signed-off-by: Alejandro Lucero <alejandro.lucero@netronome.com> Tested-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
4374ebc24b
commit
165c89b845
@ -1393,6 +1393,18 @@ eal_legacy_hugepage_init(void)
|
||||
|
||||
addr = RTE_PTR_ADD(addr, (size_t)page_sz);
|
||||
}
|
||||
if (mcfg->dma_maskbits &&
|
||||
rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
|
||||
RTE_LOG(ERR, EAL,
|
||||
"%s(): couldnt allocate memory due to IOVA exceeding limits of current DMA mask.\n",
|
||||
__func__);
|
||||
if (rte_eal_iova_mode() == RTE_IOVA_VA &&
|
||||
rte_eal_using_phys_addrs())
|
||||
RTE_LOG(ERR, EAL,
|
||||
"%s(): Please try initializing EAL with --iova-mode=pa parameter.\n",
|
||||
__func__);
|
||||
goto fail;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1628,6 +1640,14 @@ eal_legacy_hugepage_init(void)
|
||||
rte_fbarray_destroy(&msl->memseg_arr);
|
||||
}
|
||||
|
||||
if (mcfg->dma_maskbits &&
|
||||
rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
|
||||
RTE_LOG(ERR, EAL,
|
||||
"%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
|
||||
__func__);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
|
Loading…
Reference in New Issue
Block a user