malloc: enable event callbacks for external memory
When adding or removing external memory from the memory map, there may be actions that need to be taken on account of this memory (e.g. DMA mapping). Add support for triggering callbacks when adding, removing, attaching or detaching external memory. Some memory event callback handlers will need additional logic to handle external memory regions. For example, virtio callback has to completely ignore externally allocated memory, because there is no way to find file descriptors backing the memory address in a generic fashion. All other callbacks have also been adjusted to handle RTE_BAD_IOVA as IOVA address, as this is one of the expected use cases for external memory support. Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
This commit is contained in:
parent
c842d1c3b0
commit
f32c7c9de9
@ -221,6 +221,13 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
|
||||
"alloc" : "dealloc",
|
||||
va, virt_addr, iova_addr, map_len);
|
||||
|
||||
/* iova_addr may be set to RTE_BAD_IOVA */
|
||||
if (iova_addr == RTE_BAD_IOVA) {
|
||||
DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
|
||||
cur_len += map_len;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type == RTE_MEM_EVENT_ALLOC)
|
||||
ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
|
||||
else
|
||||
|
@ -301,8 +301,14 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
|
||||
void *arg)
|
||||
{
|
||||
struct virtio_user_dev *dev = arg;
|
||||
struct rte_memseg_list *msl;
|
||||
uint16_t i;
|
||||
|
||||
/* ignore externally allocated memory */
|
||||
msl = rte_mem_virt2memseg_list(addr);
|
||||
if (msl->external)
|
||||
return;
|
||||
|
||||
pthread_mutex_lock(&dev->mutex);
|
||||
|
||||
if (dev->started == false)
|
||||
|
@ -1031,6 +1031,9 @@ destroy_seg(struct malloc_elem *elem, size_t len)
|
||||
|
||||
msl = elem->msl;
|
||||
|
||||
/* notify all subscribers that a memory area is going to be removed */
|
||||
eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, elem, len);
|
||||
|
||||
/* this element can be removed */
|
||||
malloc_elem_free_list_remove(elem);
|
||||
malloc_elem_hide_region(elem, elem, len);
|
||||
@ -1120,6 +1123,10 @@ malloc_heap_add_external_memory(struct malloc_heap *heap, void *va_addr,
|
||||
RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n",
|
||||
heap->name, va_addr);
|
||||
|
||||
/* notify all subscribers that a new memory area has been added */
|
||||
eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
|
||||
va_addr, seg_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <rte_malloc.h>
|
||||
#include "malloc_elem.h"
|
||||
#include "malloc_heap.h"
|
||||
#include "eal_memalloc.h"
|
||||
|
||||
|
||||
/* Free the memory space back to heap */
|
||||
@ -440,15 +441,29 @@ sync_mem_walk(const struct rte_memseg_list *msl, void *arg)
|
||||
msl_idx = msl - mcfg->memsegs;
|
||||
found_msl = &mcfg->memsegs[msl_idx];
|
||||
|
||||
if (wa->attach)
|
||||
if (wa->attach) {
|
||||
ret = rte_fbarray_attach(&found_msl->memseg_arr);
|
||||
else
|
||||
} else {
|
||||
/* notify all subscribers that a memory area is about to
|
||||
* be removed
|
||||
*/
|
||||
eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
|
||||
msl->base_va, msl->len);
|
||||
ret = rte_fbarray_detach(&found_msl->memseg_arr);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
wa->result = -rte_errno;
|
||||
else
|
||||
} else {
|
||||
/* notify all subscribers that a new memory area was
|
||||
* added
|
||||
*/
|
||||
if (wa->attach)
|
||||
eal_memalloc_mem_event_notify(
|
||||
RTE_MEM_EVENT_ALLOC,
|
||||
msl->base_va, msl->len);
|
||||
wa->result = 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -498,6 +513,10 @@ sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach)
|
||||
rte_errno = -wa.result;
|
||||
ret = -1;
|
||||
} else {
|
||||
/* notify all subscribers that a new memory area was added */
|
||||
if (attach)
|
||||
eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
|
||||
va_addr, len);
|
||||
ret = 0;
|
||||
}
|
||||
unlock:
|
||||
|
@ -509,7 +509,7 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
|
||||
msl = rte_mem_virt2memseg_list(addr);
|
||||
|
||||
/* for IOVA as VA mode, no need to care for IOVA addresses */
|
||||
if (rte_eal_iova_mode() == RTE_IOVA_VA) {
|
||||
if (rte_eal_iova_mode() == RTE_IOVA_VA && msl->external == 0) {
|
||||
uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
|
||||
if (type == RTE_MEM_EVENT_ALLOC)
|
||||
vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
|
||||
@ -523,13 +523,19 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
|
||||
/* memsegs are contiguous in memory */
|
||||
ms = rte_mem_virt2memseg(addr, msl);
|
||||
while (cur_len < len) {
|
||||
/* some memory segments may have invalid IOVA */
|
||||
if (ms->iova == RTE_BAD_IOVA) {
|
||||
RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n",
|
||||
ms->addr);
|
||||
goto next;
|
||||
}
|
||||
if (type == RTE_MEM_EVENT_ALLOC)
|
||||
vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
|
||||
ms->iova, ms->len, 1);
|
||||
else
|
||||
vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
|
||||
ms->iova, ms->len, 0);
|
||||
|
||||
next:
|
||||
cur_len += ms->len;
|
||||
++ms;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user