mem: rename DMA mask check with proper prefix

Current name rte_eal_check_dma_mask does not follow the naming
used in the rest of the file.

Signed-off-by: Alejandro Lucero <alejandro.lucero@netronome.com>
Tested-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Alejandro Lucero 2018-11-01 19:53:25 +00:00 committed by Thomas Monjalon
parent af0aa2357d
commit 0de9eb6138
7 changed files with 8 additions and 8 deletions

View File

@ -63,7 +63,7 @@ New Features
* **Added check for ensuring allocated memory addressable by devices.**
Some devices can have addressing limitations so a new function,
``rte_eal_check_dma_mask``, has been added for checking allocated memory is
``rte_mem_check_dma_mask``, has been added for checking allocated memory is
not out of the device range. Because now memory can be dynamically allocated
after initialization, a dma mask is kept and any new allocated memory will be
checked out against that dma mask and rejected if out of range. If more than

View File

@ -615,7 +615,7 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev)
mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
return rte_eal_check_dma_mask(mgaw) == 0 ? true : false;
return rte_mem_check_dma_mask(mgaw) == 0 ? true : false;
}
#elif defined(RTE_ARCH_PPC_64)
static bool

View File

@ -2703,7 +2703,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* NFP can not handle DMA addresses requiring more than 40 bits */
if (rte_eal_check_dma_mask(40)) {
if (rte_mem_check_dma_mask(40)) {
RTE_LOG(ERR, PMD, "device %s can not be used:",
pci_dev->device.name);
RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");

View File

@ -49,7 +49,7 @@ static uint64_t system_page_sz;
* Current known limitations are 39 or 40 bits. Setting the starting address
* at 4GB implies there are 508GB or 1020GB for mapping the available
* hugepages. This is likely enough for most systems, although a device with
* addressing limitations should call rte_eal_check_dma_mask for ensuring all
* addressing limitations should call rte_mem_check_dma_mask for ensuring all
* memory is within supported range.
*/
static uint64_t baseaddr = 0x100000000;
@ -447,7 +447,7 @@ check_iova(const struct rte_memseg_list *msl __rte_unused,
/* check memseg iovas are within the required range based on dma mask */
int __rte_experimental
rte_eal_check_dma_mask(uint8_t maskbits)
rte_mem_check_dma_mask(uint8_t maskbits)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t mask;

View File

@ -464,7 +464,7 @@ unsigned rte_memory_get_nchannel(void);
unsigned rte_memory_get_nrank(void);
/* check memsegs iovas are within a range based on dma mask */
int __rte_experimental rte_eal_check_dma_mask(uint8_t maskbits);
int __rte_experimental rte_mem_check_dma_mask(uint8_t maskbits);
/**
* Drivers based on uio will not load unless physical

View File

@ -322,7 +322,7 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
}
if (mcfg->dma_maskbits) {
if (rte_eal_check_dma_mask(mcfg->dma_maskbits)) {
if (rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
"%s(): couldn't allocate memory due to DMA mask\n",
__func__);

View File

@ -295,7 +295,6 @@ EXPERIMENTAL {
rte_devargs_parsef;
rte_devargs_remove;
rte_devargs_type_count;
rte_eal_check_dma_mask;
rte_eal_cleanup;
rte_fbarray_attach;
rte_fbarray_destroy;
@ -331,6 +330,7 @@ EXPERIMENTAL {
rte_malloc_heap_socket_is_external;
rte_mem_alloc_validator_register;
rte_mem_alloc_validator_unregister;
rte_mem_check_dma_mask;
rte_mem_event_callback_register;
rte_mem_event_callback_unregister;
rte_mem_iova2virt;