malloc: allow adding memory to named heaps

Add an API to add externally allocated memory to malloc heap. The
memory will be stored in memseg lists like regular DPDK memory.
Multiple segments are allowed within a heap. If IOVA table is
not provided, IOVA addresses are filled in with RTE_BAD_IOVA.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
This commit is contained in:
Anatoly Burakov 2018-10-02 14:34:51 +01:00 committed by Thomas Monjalon
parent 15d6dd023c
commit 7d75c31014
5 changed files with 169 additions and 0 deletions

View File

@ -263,6 +263,45 @@ int
rte_malloc_get_socket_stats(int socket,
struct rte_malloc_socket_stats *socket_stats);
/**
* Add memory chunk to a heap with specified name.
*
* @note Multiple memory chunks can be added to the same heap
*
* @note Memory must be previously allocated for DPDK to be able to use it as a
* malloc heap. Failing to do so will result in undefined behavior, up to and
* including segmentation faults.
*
* @note Calling this function will erase any contents already present at the
* supplied memory address.
*
* @param heap_name
* Name of the heap to add memory chunk to
* @param va_addr
* Start of virtual area to add to the heap
* @param len
* Length of virtual area to add to the heap
* @param iova_addrs
* Array of page IOVA addresses corresponding to each page in this memory
* area. Can be NULL, in which case page IOVA addresses will be set to
* RTE_BAD_IOVA.
* @param n_pages
* Number of elements in the iova_addrs array. Ignored if ``iova_addrs``
* is NULL.
* @param page_sz
* Page size of the underlying memory
*
* @return
* - 0 on success
* - -1 in case of error, with rte_errno set to one of the following:
* EINVAL - one of the parameters was invalid
* EPERM - attempted to add memory to a reserved heap
* ENOSPC - no more space in internal config to store a new memory chunk
*/
int __rte_experimental
rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz);
/**
* Creates a new empty malloc heap with a specified name.
*

View File

@ -1023,6 +1023,80 @@ malloc_heap_dump(struct malloc_heap *heap, FILE *f)
rte_spinlock_unlock(&heap->lock);
}
int
malloc_heap_add_external_memory(struct malloc_heap *heap, void *va_addr,
rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
char fbarray_name[RTE_FBARRAY_NAME_LEN];
struct rte_memseg_list *msl = NULL;
struct rte_fbarray *arr;
size_t seg_len = n_pages * page_sz;
unsigned int i;
/* first, find a free memseg list */
for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
struct rte_memseg_list *tmp = &mcfg->memsegs[i];
if (tmp->base_va == NULL) {
msl = tmp;
break;
}
}
if (msl == NULL) {
RTE_LOG(ERR, EAL, "Couldn't find empty memseg list\n");
rte_errno = ENOSPC;
return -1;
}
snprintf(fbarray_name, sizeof(fbarray_name) - 1, "%s_%p",
heap->name, va_addr);
/* create the backing fbarray */
if (rte_fbarray_init(&msl->memseg_arr, fbarray_name, n_pages,
sizeof(struct rte_memseg)) < 0) {
RTE_LOG(ERR, EAL, "Couldn't create fbarray backing the memseg list\n");
return -1;
}
arr = &msl->memseg_arr;
/* fbarray created, fill it up */
for (i = 0; i < n_pages; i++) {
struct rte_memseg *ms;
rte_fbarray_set_used(arr, i);
ms = rte_fbarray_get(arr, i);
ms->addr = RTE_PTR_ADD(va_addr, i * page_sz);
ms->iova = iova_addrs == NULL ? RTE_BAD_IOVA : iova_addrs[i];
ms->hugepage_sz = page_sz;
ms->len = page_sz;
ms->nchannel = rte_memory_get_nchannel();
ms->nrank = rte_memory_get_nrank();
ms->socket_id = heap->socket_id;
}
/* set up the memseg list */
msl->base_va = va_addr;
msl->page_sz = page_sz;
msl->socket_id = heap->socket_id;
msl->len = seg_len;
msl->version = 0;
msl->external = 1;
/* erase contents of new memory */
memset(va_addr, 0, seg_len);
/* now, add newly minted memory to the malloc heap */
malloc_heap_add_memory(heap, msl, va_addr, seg_len);
heap->total_size += seg_len;
/* all done! */
RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n",
heap->name, va_addr);
return 0;
}
int
malloc_heap_create(struct malloc_heap *heap, const char *heap_name)
{

View File

@ -39,6 +39,10 @@ malloc_heap_create(struct malloc_heap *heap, const char *heap_name);
int
malloc_heap_destroy(struct malloc_heap *heap);
int
malloc_heap_add_external_memory(struct malloc_heap *heap, void *va_addr,
rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz);
int
malloc_heap_free(struct malloc_elem *elem);

View File

@ -327,6 +327,57 @@ find_named_heap(const char *name)
return NULL;
}
int
rte_malloc_heap_memory_add(const char *heap_name, void *va_addr, size_t len,
rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct malloc_heap *heap = NULL;
unsigned int n;
int ret;
if (heap_name == NULL || va_addr == NULL ||
page_sz == 0 || !rte_is_power_of_2(page_sz) ||
strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) == 0 ||
strnlen(heap_name, RTE_HEAP_NAME_MAX_LEN) ==
RTE_HEAP_NAME_MAX_LEN) {
rte_errno = EINVAL;
ret = -1;
goto unlock;
}
rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
/* find our heap */
heap = find_named_heap(heap_name);
if (heap == NULL) {
rte_errno = ENOENT;
ret = -1;
goto unlock;
}
if (heap->socket_id < RTE_MAX_NUMA_NODES) {
/* cannot add memory to internal heaps */
rte_errno = EPERM;
ret = -1;
goto unlock;
}
n = len / page_sz;
if (n != n_pages && iova_addrs != NULL) {
rte_errno = EINVAL;
ret = -1;
goto unlock;
}
rte_spinlock_lock(&heap->lock);
ret = malloc_heap_add_external_memory(heap, va_addr, iova_addrs, n,
page_sz);
rte_spinlock_unlock(&heap->lock);
unlock:
rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
return ret;
}
int
rte_malloc_heap_create(const char *heap_name)
{

View File

@ -321,6 +321,7 @@ EXPERIMENTAL {
rte_malloc_heap_create;
rte_malloc_heap_destroy;
rte_malloc_heap_get_socket;
rte_malloc_heap_memory_add;
rte_malloc_heap_socket_is_external;
rte_mem_alloc_validator_register;
rte_mem_alloc_validator_unregister;