mem: add thread unsafe version for DMA mask check

During memory initialization calling rte_mem_check_dma_mask
leads to a deadlock because memory_hotplug_lock is locked by a
writer, the current code in execution, and rte_memseg_walk
tries to lock as a reader.

This patch adds a thread_unsafe version which will call the final
function specifying the memory_hotplug_lock does not need to be
acquired. The patch also modified rte_mem_check_dma_mask as a
intermediate step which will call the final function as before,
implying memory_hotplug_lock will be acquired.

PMDs should always use the version acquiring the lock with the
thread_unsafe one being just for internal EAL memory code.

Fixes: 223b7f1d5ef6 ("mem: add function for checking memseg IOVA")

Signed-off-by: Alejandro Lucero <alejandro.lucero@netronome.com>
Tested-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Alejandro Lucero 2018-11-01 19:53:30 +00:00 committed by Thomas Monjalon
parent 165c89b845
commit 84e7477e10
5 changed files with 56 additions and 10 deletions

View File

@ -446,11 +446,12 @@ check_iova(const struct rte_memseg_list *msl __rte_unused,
#endif
/* check memseg iovas are within the required range based on dma mask */
int __rte_experimental
rte_mem_check_dma_mask(uint8_t maskbits)
static int __rte_experimental
check_dma_mask(uint8_t maskbits, bool thread_unsafe)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
uint64_t mask;
int ret;
/* sanity check */
if (maskbits > MAX_DMA_MASK_BITS) {
@ -462,7 +463,12 @@ rte_mem_check_dma_mask(uint8_t maskbits)
/* create dma mask */
mask = ~((1ULL << maskbits) - 1);
if (rte_memseg_walk(check_iova, &mask))
if (thread_unsafe)
ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
else
ret = rte_memseg_walk(check_iova, &mask);
if (ret)
/*
* Dma mask precludes hugepage usage.
* This device can not be used and we do not need to keep
@ -480,6 +486,18 @@ rte_mem_check_dma_mask(uint8_t maskbits)
return 0;
}
int __rte_experimental
rte_mem_check_dma_mask(uint8_t maskbits)
{
return check_dma_mask(maskbits, false);
}
int __rte_experimental
rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
{
return check_dma_mask(maskbits, true);
}
/*
* Set dma mask to use when memory initialization is done.
*

View File

@ -463,16 +463,43 @@ unsigned rte_memory_get_nchannel(void);
*/
unsigned rte_memory_get_nrank(void);
/* check memsegs iovas are within a range based on dma mask */
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Check if all currently allocated memory segments are compliant with
* supplied DMA address width.
*
* @param maskbits
* Address width to check against.
*/
int __rte_experimental rte_mem_check_dma_mask(uint8_t maskbits);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Set dma mask to use once memory initialization is done.
* Previous function rte_mem_check_dma_mask can not be used
* safely until memory has been initialized.
* Check if all currently allocated memory segments are compliant with
* supplied DMA address width. This function will use
* rte_memseg_walk_thread_unsafe instead of rte_memseg_walk implying
* memory_hotplug_lock will not be acquired avoiding deadlock during
* memory initialization.
*
* This function is just for EAL core memory internal use. Drivers should
* use the previous rte_mem_check_dma_mask.
*
* @param maskbits
* Address width to check against.
*/
int __rte_experimental rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Set dma mask to use once memory initialization is done. Previous functions
* rte_mem_check_dma_mask and rte_mem_check_dma_mask_thread_unsafe can not be
* used safely until memory has been initialized.
*/
void __rte_experimental rte_mem_set_dma_mask(uint8_t maskbits);

View File

@ -334,7 +334,7 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
* executed. For 2) implies the new memory can not be added.
*/
if (mcfg->dma_maskbits &&
rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
/*
* Currently this can only happen if IOMMU is enabled
* and the address width supported by the IOMMU hw is

View File

@ -1394,7 +1394,7 @@ eal_legacy_hugepage_init(void)
addr = RTE_PTR_ADD(addr, (size_t)page_sz);
}
if (mcfg->dma_maskbits &&
rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
"%s(): couldnt allocate memory due to IOVA exceeding limits of current DMA mask.\n",
__func__);
@ -1641,7 +1641,7 @@ eal_legacy_hugepage_init(void)
}
if (mcfg->dma_maskbits &&
rte_mem_check_dma_mask(mcfg->dma_maskbits)) {
rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
"%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
__func__);

View File

@ -331,6 +331,7 @@ EXPERIMENTAL {
rte_mem_alloc_validator_register;
rte_mem_alloc_validator_unregister;
rte_mem_check_dma_mask;
rte_mem_check_dma_mask_thread_unsafe;
rte_mem_event_callback_register;
rte_mem_event_callback_unregister;
rte_mem_iova2virt;