memzone: enable IOVA-contiguous reserving

This adds a new flag to request reserved memzone to be IOVA
contiguous. This is useful for allocating hardware resources like
NIC rings/queues etc.For now, hugepage memory is always contiguous,
but we need to prepare the drivers for the switch.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Tested-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Tested-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
This commit is contained in:
Anatoly Burakov 2018-04-11 13:29:46 +01:00 committed by Thomas Monjalon
parent 5ea85289a9
commit 23fa86e529
2 changed files with 28 additions and 8 deletions

View File

@ -99,12 +99,13 @@ find_heap_max_free_elem(int *s, unsigned align)
static const struct rte_memzone * static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
int socket_id, unsigned int flags, unsigned int align, int socket_id, unsigned int flags, unsigned int align,
unsigned int bound, bool contig) unsigned int bound)
{ {
struct rte_memzone *mz; struct rte_memzone *mz;
struct rte_mem_config *mcfg; struct rte_mem_config *mcfg;
size_t requested_len; size_t requested_len;
int socket, i; int socket, i;
bool contig;
/* get pointer to global configuration */ /* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config; mcfg = rte_eal_get_configuration()->mem_config;
@ -170,7 +171,17 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
if (!rte_eal_has_hugepages()) if (!rte_eal_has_hugepages())
socket_id = SOCKET_ID_ANY; socket_id = SOCKET_ID_ANY;
contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
/* malloc only cares about size flags, remove contig flag from flags */
flags &= ~RTE_MEMZONE_IOVA_CONTIG;
if (len == 0) { if (len == 0) {
/* len == 0 is only allowed for non-contiguous zones */
if (contig) {
RTE_LOG(DEBUG, EAL, "Reserving zero-length contiguous memzones is not supported\n");
rte_errno = EINVAL;
return NULL;
}
if (bound != 0) if (bound != 0)
requested_len = bound; requested_len = bound;
else { else {
@ -238,8 +249,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
static const struct rte_memzone * static const struct rte_memzone *
rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id, rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id,
unsigned int flags, unsigned int align, unsigned int bound, unsigned int flags, unsigned int align, unsigned int bound)
bool contig)
{ {
struct rte_mem_config *mcfg; struct rte_mem_config *mcfg;
const struct rte_memzone *mz = NULL; const struct rte_memzone *mz = NULL;
@ -250,7 +260,7 @@ rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id,
rte_rwlock_write_lock(&mcfg->mlock); rte_rwlock_write_lock(&mcfg->mlock);
mz = memzone_reserve_aligned_thread_unsafe( mz = memzone_reserve_aligned_thread_unsafe(
name, len, socket_id, flags, align, bound, contig); name, len, socket_id, flags, align, bound);
rte_rwlock_write_unlock(&mcfg->mlock); rte_rwlock_write_unlock(&mcfg->mlock);
@ -267,7 +277,7 @@ rte_memzone_reserve_bounded(const char *name, size_t len, int socket_id,
unsigned flags, unsigned align, unsigned bound) unsigned flags, unsigned align, unsigned bound)
{ {
return rte_memzone_reserve_thread_safe(name, len, socket_id, flags, return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
align, bound, false); align, bound);
} }
/* /*
@ -279,7 +289,7 @@ rte_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
unsigned flags, unsigned align) unsigned flags, unsigned align)
{ {
return rte_memzone_reserve_thread_safe(name, len, socket_id, flags, return rte_memzone_reserve_thread_safe(name, len, socket_id, flags,
align, 0, false); align, 0);
} }
/* /*
@ -291,8 +301,7 @@ rte_memzone_reserve(const char *name, size_t len, int socket_id,
unsigned flags) unsigned flags)
{ {
return rte_memzone_reserve_thread_safe(name, len, socket_id, return rte_memzone_reserve_thread_safe(name, len, socket_id,
flags, RTE_CACHE_LINE_SIZE, 0, flags, RTE_CACHE_LINE_SIZE, 0);
false);
} }
int int

View File

@ -23,6 +23,7 @@
*/ */
#include <stdio.h> #include <stdio.h>
#include <rte_compat.h>
#include <rte_memory.h> #include <rte_memory.h>
#include <rte_common.h> #include <rte_common.h>
@ -39,6 +40,7 @@ extern "C" {
#define RTE_MEMZONE_512MB 0x00040000 /**< Use 512MB pages. */ #define RTE_MEMZONE_512MB 0x00040000 /**< Use 512MB pages. */
#define RTE_MEMZONE_4GB 0x00080000 /**< Use 4GB pages. */ #define RTE_MEMZONE_4GB 0x00080000 /**< Use 4GB pages. */
#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */ #define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */
#define RTE_MEMZONE_IOVA_CONTIG 0x00100000 /**< Ask for IOVA-contiguous memzone. */
/** /**
* A structure describing a memzone, which is a contiguous portion of * A structure describing a memzone, which is a contiguous portion of
@ -102,6 +104,9 @@ struct rte_memzone {
* If this flag is not set, the function * If this flag is not set, the function
* will return error on an unavailable size * will return error on an unavailable size
* request. * request.
* - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
* This option should be used when allocating
* memory intended for hardware rings etc.
* @return * @return
* A pointer to a correctly-filled read-only memzone descriptor, or NULL * A pointer to a correctly-filled read-only memzone descriptor, or NULL
* on error. * on error.
@ -152,6 +157,9 @@ const struct rte_memzone *rte_memzone_reserve(const char *name,
* If this flag is not set, the function * If this flag is not set, the function
* will return error on an unavailable size * will return error on an unavailable size
* request. * request.
* - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
* This option should be used when allocating
* memory intended for hardware rings etc.
* @param align * @param align
* Alignment for resulting memzone. Must be a power of 2. * Alignment for resulting memzone. Must be a power of 2.
* @return * @return
@ -207,6 +215,9 @@ const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
* If this flag is not set, the function * If this flag is not set, the function
* will return error on an unavailable size * will return error on an unavailable size
* request. * request.
* - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
* This option should be used when allocating
* memory intended for hardware rings etc.
* @param align * @param align
* Alignment for resulting memzone. Must be a power of 2. * Alignment for resulting memzone. Must be a power of 2.
* @param bound * @param bound