diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c index cb33dd8913..493b6e5be3 100644 --- a/drivers/bus/fslmc/fslmc_vfio.c +++ b/drivers/bus/fslmc/fslmc_vfio.c @@ -221,6 +221,13 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, "alloc" : "dealloc", va, virt_addr, iova_addr, map_len); + /* iova_addr may be set to RTE_BAD_IOVA */ + if (iova_addr == RTE_BAD_IOVA) { + DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n"); + cur_len += map_len; + continue; + } + if (type == RTE_MEM_EVENT_ALLOC) ret = fslmc_map_dma(virt_addr, iova_addr, map_len); else diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index 55a82e4b0c..a185aed34b 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -301,8 +301,14 @@ virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, void *arg) { struct virtio_user_dev *dev = arg; + struct rte_memseg_list *msl; uint16_t i; + /* ignore externally allocated memory */ + msl = rte_mem_virt2memseg_list(addr); + if (msl->external) + return; + pthread_mutex_lock(&dev->mutex); if (dev->started == false) diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c index adc1669aa3..08ec75377d 100644 --- a/lib/librte_eal/common/malloc_heap.c +++ b/lib/librte_eal/common/malloc_heap.c @@ -1031,6 +1031,9 @@ destroy_seg(struct malloc_elem *elem, size_t len) msl = elem->msl; + /* notify all subscribers that a memory area is going to be removed */ + eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, elem, len); + /* this element can be removed */ malloc_elem_free_list_remove(elem); malloc_elem_hide_region(elem, elem, len); @@ -1120,6 +1123,10 @@ malloc_heap_add_external_memory(struct malloc_heap *heap, void *va_addr, RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n", heap->name, va_addr); + /* notify all subscribers that a new memory area has been added */ + eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, + va_addr, seg_len); + return 0; } diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c index dd00254392..b27a8ad2e2 100644 --- a/lib/librte_eal/common/rte_malloc.c +++ b/lib/librte_eal/common/rte_malloc.c @@ -24,6 +24,7 @@ #include #include "malloc_elem.h" #include "malloc_heap.h" +#include "eal_memalloc.h" /* Free the memory space back to heap */ @@ -440,15 +441,29 @@ sync_mem_walk(const struct rte_memseg_list *msl, void *arg) msl_idx = msl - mcfg->memsegs; found_msl = &mcfg->memsegs[msl_idx]; - if (wa->attach) + if (wa->attach) { ret = rte_fbarray_attach(&found_msl->memseg_arr); - else + } else { + /* notify all subscribers that a memory area is about to + * be removed + */ + eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, + msl->base_va, msl->len); ret = rte_fbarray_detach(&found_msl->memseg_arr); + } - if (ret < 0) + if (ret < 0) { wa->result = -rte_errno; - else + } else { + /* notify all subscribers that a new memory area was + * added + */ + if (wa->attach) + eal_memalloc_mem_event_notify( + RTE_MEM_EVENT_ALLOC, + msl->base_va, msl->len); wa->result = 0; + } return 1; } return 0; @@ -498,6 +513,10 @@ sync_memory(const char *heap_name, void *va_addr, size_t len, bool attach) rte_errno = -wa.result; ret = -1; } else { + /* notify all subscribers that a new memory area was added */ + if (attach) + eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, + va_addr, len); ret = 0; } unlock: diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c index fddbc3b548..d7268e4ce7 100644 --- a/lib/librte_eal/linuxapp/eal/eal_vfio.c +++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c @@ -509,7 +509,7 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len, msl = rte_mem_virt2memseg_list(addr); /* for IOVA as VA mode, no need to care for IOVA addresses */ - if (rte_eal_iova_mode() == RTE_IOVA_VA) { + if (rte_eal_iova_mode() == RTE_IOVA_VA && msl->external == 0) { uint64_t vfio_va = (uint64_t)(uintptr_t)addr; if (type == RTE_MEM_EVENT_ALLOC) vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va, @@ -523,13 +523,19 @@ vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len, /* memsegs are contiguous in memory */ ms = rte_mem_virt2memseg(addr, msl); while (cur_len < len) { + /* some memory segments may have invalid IOVA */ + if (ms->iova == RTE_BAD_IOVA) { + RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n", + ms->addr); + goto next; + } if (type == RTE_MEM_EVENT_ALLOC) vfio_dma_mem_map(default_vfio_cfg, ms->addr_64, ms->iova, ms->len, 1); else vfio_dma_mem_map(default_vfio_cfg, ms->addr_64, ms->iova, ms->len, 0); - +next: cur_len += ms->len; ++ms; }