2017-12-19 15:49:05 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2014 Intel Corporation
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdint.h>
|
2018-02-01 10:14:50 +00:00
|
|
|
#include <string.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <inttypes.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
#include <rte_random.h>
|
|
|
|
#include <rte_cycles.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_memzone.h>
|
|
|
|
#include <rte_eal.h>
|
2012-12-19 23:00:00 +00:00
|
|
|
#include <rte_eal_memconfig.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <rte_common.h>
|
2014-02-11 16:24:25 +00:00
|
|
|
#include <rte_string_fns.h>
|
2015-07-15 16:32:22 +00:00
|
|
|
#include <rte_errno.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include "../../lib/librte_eal/common/malloc_elem.h"
|
2019-07-05 13:10:30 +00:00
|
|
|
#include "../../lib/librte_eal/common/eal_memcfg.h"
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#include "test.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Memzone
|
|
|
|
* =======
|
|
|
|
*
|
|
|
|
* - Search for three reserved zones or reserve them if they do not exist:
|
|
|
|
*
|
|
|
|
* - One is on any socket id.
|
|
|
|
* - The second is on socket 0.
|
|
|
|
* - The last one is on socket 1 (if socket 1 exists).
|
|
|
|
*
|
|
|
|
* - Check that the zones exist.
|
|
|
|
*
|
|
|
|
* - Check that the zones are cache-aligned.
|
|
|
|
*
|
|
|
|
* - Check that zones do not overlap.
|
|
|
|
*
|
|
|
|
* - Check that the zones are on the correct socket id.
|
|
|
|
*
|
|
|
|
* - Check that a lookup of the first zone returns the same pointer.
|
|
|
|
*
|
|
|
|
* - Check that it is not possible to create another zone with the
|
|
|
|
* same name as an existing zone.
|
|
|
|
*
|
|
|
|
* - Check flags for specific huge page size reservation
|
|
|
|
*/
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
#define TEST_MEMZONE_NAME(suffix) "MZ_TEST_" suffix
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* Test if memory overlaps: return 1 if true, or 0 if false. */
|
|
|
|
static int
|
2017-11-04 01:22:28 +00:00
|
|
|
is_memory_overlap(rte_iova_t ptr1, size_t len1, rte_iova_t ptr2, size_t len2)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
|
|
|
if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
|
|
|
|
return 1;
|
|
|
|
else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_memzone_invalid_alignment(void)
|
|
|
|
{
|
|
|
|
const struct rte_memzone * mz;
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_lookup(TEST_MEMZONE_NAME("invalid_alignment"));
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("Zone with invalid alignment has been reserved\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("invalid_alignment"),
|
|
|
|
100, SOCKET_ID_ANY, 0, 100);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("Zone with invalid alignment has been reserved\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
|
|
|
|
{
|
|
|
|
const struct rte_memzone * mz;
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_lookup(
|
|
|
|
TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum"));
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("zone_size_bigger_than_the_maximum has been reserved\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum"),
|
|
|
|
(size_t)-1, SOCKET_ID_ANY, 0);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("It is impossible to reserve such big a memzone\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-11 12:30:06 +00:00
|
|
|
struct walk_arg {
|
|
|
|
int hugepage_2MB_avail;
|
|
|
|
int hugepage_1GB_avail;
|
|
|
|
int hugepage_16MB_avail;
|
|
|
|
int hugepage_16GB_avail;
|
|
|
|
};
|
|
|
|
static int
|
mem: replace memseg with memseg lists
Before, we were aggregating multiple pages into one memseg, so the
number of memsegs was small. Now, each page gets its own memseg,
so the list of memsegs is huge. To accommodate the new memseg list
size and to keep the under-the-hood workings sane, the memseg list
is now not just a single list, but multiple lists. To be precise,
each hugepage size available on the system gets one or more memseg
lists, per socket.
In order to support dynamic memory allocation, we reserve all
memory in advance (unless we're in 32-bit legacy mode, in which
case we do not preallocate memory). As in, we do an anonymous
mmap() of the entire maximum size of memory per hugepage size, per
socket (which is limited to either RTE_MAX_MEMSEG_PER_TYPE pages or
RTE_MAX_MEM_MB_PER_TYPE megabytes worth of memory, whichever is the
smaller one), split over multiple lists (which are limited to
either RTE_MAX_MEMSEG_PER_LIST memsegs or RTE_MAX_MEM_MB_PER_LIST
megabytes per list, whichever is the smaller one). There is also
a global limit of CONFIG_RTE_MAX_MEM_MB megabytes, which is mainly
used for 32-bit targets to limit amounts of preallocated memory,
but can be used to place an upper limit on total amount of VA
memory that can be allocated by DPDK application.
So, for each hugepage size, we get (by default) up to 128G worth
of memory, per socket, split into chunks of up to 32G in size.
The address space is claimed at the start, in eal_common_memory.c.
The actual page allocation code is in eal_memalloc.c (Linux-only),
and largely consists of copied EAL memory init code.
Pages in the list are also indexed by address. That is, in order
to figure out where the page belongs, one can simply look at base
address for a memseg list. Similarly, figuring out IOVA address
of a memzone is a matter of finding the right memseg list, getting
offset and dividing by page size to get the appropriate memseg.
This commit also removes rte_eal_dump_physmem_layout() call,
according to deprecation notice [1], and removes that deprecation
notice as well.
On 32-bit targets due to limited VA space, DPDK will no longer
spread memory to different sockets like before. Instead, it will
(by default) allocate all of the memory on socket where master
lcore is. To override this behavior, --socket-mem must be used.
The rest of the changes are really ripple effects from the memseg
change - heap changes, compile fixes, and rewrites to support
fbarray-backed memseg lists. Due to earlier switch to _walk()
functions, most of the changes are simple fixes, however some
of the _walk() calls were switched to memseg list walk, where
it made sense to do so.
Additionally, we are also switching locks from flock() to fcntl().
Down the line, we will be introducing single-file segments option,
and we cannot use flock() locks to lock parts of the file. Therefore,
we will use fcntl() locks for legacy mem as well, in case someone is
unfortunate enough to accidentally start legacy mem primary process
alongside an already working non-legacy mem-based primary process.
[1] http://dpdk.org/dev/patchwork/patch/34002/
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Tested-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Tested-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
2018-04-11 12:30:24 +00:00
|
|
|
find_available_pagesz(const struct rte_memseg_list *msl, void *arg)
|
2018-04-11 12:30:06 +00:00
|
|
|
{
|
|
|
|
struct walk_arg *wa = arg;
|
|
|
|
|
2018-10-02 13:34:40 +00:00
|
|
|
if (msl->external)
|
|
|
|
return 0;
|
|
|
|
|
mem: replace memseg with memseg lists
Before, we were aggregating multiple pages into one memseg, so the
number of memsegs was small. Now, each page gets its own memseg,
so the list of memsegs is huge. To accommodate the new memseg list
size and to keep the under-the-hood workings sane, the memseg list
is now not just a single list, but multiple lists. To be precise,
each hugepage size available on the system gets one or more memseg
lists, per socket.
In order to support dynamic memory allocation, we reserve all
memory in advance (unless we're in 32-bit legacy mode, in which
case we do not preallocate memory). As in, we do an anonymous
mmap() of the entire maximum size of memory per hugepage size, per
socket (which is limited to either RTE_MAX_MEMSEG_PER_TYPE pages or
RTE_MAX_MEM_MB_PER_TYPE megabytes worth of memory, whichever is the
smaller one), split over multiple lists (which are limited to
either RTE_MAX_MEMSEG_PER_LIST memsegs or RTE_MAX_MEM_MB_PER_LIST
megabytes per list, whichever is the smaller one). There is also
a global limit of CONFIG_RTE_MAX_MEM_MB megabytes, which is mainly
used for 32-bit targets to limit amounts of preallocated memory,
but can be used to place an upper limit on total amount of VA
memory that can be allocated by DPDK application.
So, for each hugepage size, we get (by default) up to 128G worth
of memory, per socket, split into chunks of up to 32G in size.
The address space is claimed at the start, in eal_common_memory.c.
The actual page allocation code is in eal_memalloc.c (Linux-only),
and largely consists of copied EAL memory init code.
Pages in the list are also indexed by address. That is, in order
to figure out where the page belongs, one can simply look at base
address for a memseg list. Similarly, figuring out IOVA address
of a memzone is a matter of finding the right memseg list, getting
offset and dividing by page size to get the appropriate memseg.
This commit also removes rte_eal_dump_physmem_layout() call,
according to deprecation notice [1], and removes that deprecation
notice as well.
On 32-bit targets due to limited VA space, DPDK will no longer
spread memory to different sockets like before. Instead, it will
(by default) allocate all of the memory on socket where master
lcore is. To override this behavior, --socket-mem must be used.
The rest of the changes are really ripple effects from the memseg
change - heap changes, compile fixes, and rewrites to support
fbarray-backed memseg lists. Due to earlier switch to _walk()
functions, most of the changes are simple fixes, however some
of the _walk() calls were switched to memseg list walk, where
it made sense to do so.
Additionally, we are also switching locks from flock() to fcntl().
Down the line, we will be introducing single-file segments option,
and we cannot use flock() locks to lock parts of the file. Therefore,
we will use fcntl() locks for legacy mem as well, in case someone is
unfortunate enough to accidentally start legacy mem primary process
alongside an already working non-legacy mem-based primary process.
[1] http://dpdk.org/dev/patchwork/patch/34002/
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Tested-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Tested-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
2018-04-11 12:30:24 +00:00
|
|
|
if (msl->page_sz == RTE_PGSIZE_2M)
|
2018-04-11 12:30:06 +00:00
|
|
|
wa->hugepage_2MB_avail = 1;
|
mem: replace memseg with memseg lists
Before, we were aggregating multiple pages into one memseg, so the
number of memsegs was small. Now, each page gets its own memseg,
so the list of memsegs is huge. To accommodate the new memseg list
size and to keep the under-the-hood workings sane, the memseg list
is now not just a single list, but multiple lists. To be precise,
each hugepage size available on the system gets one or more memseg
lists, per socket.
In order to support dynamic memory allocation, we reserve all
memory in advance (unless we're in 32-bit legacy mode, in which
case we do not preallocate memory). As in, we do an anonymous
mmap() of the entire maximum size of memory per hugepage size, per
socket (which is limited to either RTE_MAX_MEMSEG_PER_TYPE pages or
RTE_MAX_MEM_MB_PER_TYPE megabytes worth of memory, whichever is the
smaller one), split over multiple lists (which are limited to
either RTE_MAX_MEMSEG_PER_LIST memsegs or RTE_MAX_MEM_MB_PER_LIST
megabytes per list, whichever is the smaller one). There is also
a global limit of CONFIG_RTE_MAX_MEM_MB megabytes, which is mainly
used for 32-bit targets to limit amounts of preallocated memory,
but can be used to place an upper limit on total amount of VA
memory that can be allocated by DPDK application.
So, for each hugepage size, we get (by default) up to 128G worth
of memory, per socket, split into chunks of up to 32G in size.
The address space is claimed at the start, in eal_common_memory.c.
The actual page allocation code is in eal_memalloc.c (Linux-only),
and largely consists of copied EAL memory init code.
Pages in the list are also indexed by address. That is, in order
to figure out where the page belongs, one can simply look at base
address for a memseg list. Similarly, figuring out IOVA address
of a memzone is a matter of finding the right memseg list, getting
offset and dividing by page size to get the appropriate memseg.
This commit also removes rte_eal_dump_physmem_layout() call,
according to deprecation notice [1], and removes that deprecation
notice as well.
On 32-bit targets due to limited VA space, DPDK will no longer
spread memory to different sockets like before. Instead, it will
(by default) allocate all of the memory on socket where master
lcore is. To override this behavior, --socket-mem must be used.
The rest of the changes are really ripple effects from the memseg
change - heap changes, compile fixes, and rewrites to support
fbarray-backed memseg lists. Due to earlier switch to _walk()
functions, most of the changes are simple fixes, however some
of the _walk() calls were switched to memseg list walk, where
it made sense to do so.
Additionally, we are also switching locks from flock() to fcntl().
Down the line, we will be introducing single-file segments option,
and we cannot use flock() locks to lock parts of the file. Therefore,
we will use fcntl() locks for legacy mem as well, in case someone is
unfortunate enough to accidentally start legacy mem primary process
alongside an already working non-legacy mem-based primary process.
[1] http://dpdk.org/dev/patchwork/patch/34002/
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Tested-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Tested-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
2018-04-11 12:30:24 +00:00
|
|
|
if (msl->page_sz == RTE_PGSIZE_1G)
|
2018-04-11 12:30:06 +00:00
|
|
|
wa->hugepage_1GB_avail = 1;
|
mem: replace memseg with memseg lists
Before, we were aggregating multiple pages into one memseg, so the
number of memsegs was small. Now, each page gets its own memseg,
so the list of memsegs is huge. To accommodate the new memseg list
size and to keep the under-the-hood workings sane, the memseg list
is now not just a single list, but multiple lists. To be precise,
each hugepage size available on the system gets one or more memseg
lists, per socket.
In order to support dynamic memory allocation, we reserve all
memory in advance (unless we're in 32-bit legacy mode, in which
case we do not preallocate memory). As in, we do an anonymous
mmap() of the entire maximum size of memory per hugepage size, per
socket (which is limited to either RTE_MAX_MEMSEG_PER_TYPE pages or
RTE_MAX_MEM_MB_PER_TYPE megabytes worth of memory, whichever is the
smaller one), split over multiple lists (which are limited to
either RTE_MAX_MEMSEG_PER_LIST memsegs or RTE_MAX_MEM_MB_PER_LIST
megabytes per list, whichever is the smaller one). There is also
a global limit of CONFIG_RTE_MAX_MEM_MB megabytes, which is mainly
used for 32-bit targets to limit amounts of preallocated memory,
but can be used to place an upper limit on total amount of VA
memory that can be allocated by DPDK application.
So, for each hugepage size, we get (by default) up to 128G worth
of memory, per socket, split into chunks of up to 32G in size.
The address space is claimed at the start, in eal_common_memory.c.
The actual page allocation code is in eal_memalloc.c (Linux-only),
and largely consists of copied EAL memory init code.
Pages in the list are also indexed by address. That is, in order
to figure out where the page belongs, one can simply look at base
address for a memseg list. Similarly, figuring out IOVA address
of a memzone is a matter of finding the right memseg list, getting
offset and dividing by page size to get the appropriate memseg.
This commit also removes rte_eal_dump_physmem_layout() call,
according to deprecation notice [1], and removes that deprecation
notice as well.
On 32-bit targets due to limited VA space, DPDK will no longer
spread memory to different sockets like before. Instead, it will
(by default) allocate all of the memory on socket where master
lcore is. To override this behavior, --socket-mem must be used.
The rest of the changes are really ripple effects from the memseg
change - heap changes, compile fixes, and rewrites to support
fbarray-backed memseg lists. Due to earlier switch to _walk()
functions, most of the changes are simple fixes, however some
of the _walk() calls were switched to memseg list walk, where
it made sense to do so.
Additionally, we are also switching locks from flock() to fcntl().
Down the line, we will be introducing single-file segments option,
and we cannot use flock() locks to lock parts of the file. Therefore,
we will use fcntl() locks for legacy mem as well, in case someone is
unfortunate enough to accidentally start legacy mem primary process
alongside an already working non-legacy mem-based primary process.
[1] http://dpdk.org/dev/patchwork/patch/34002/
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Tested-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Tested-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
2018-04-11 12:30:24 +00:00
|
|
|
if (msl->page_sz == RTE_PGSIZE_16M)
|
2018-04-11 12:30:06 +00:00
|
|
|
wa->hugepage_16MB_avail = 1;
|
mem: replace memseg with memseg lists
Before, we were aggregating multiple pages into one memseg, so the
number of memsegs was small. Now, each page gets its own memseg,
so the list of memsegs is huge. To accommodate the new memseg list
size and to keep the under-the-hood workings sane, the memseg list
is now not just a single list, but multiple lists. To be precise,
each hugepage size available on the system gets one or more memseg
lists, per socket.
In order to support dynamic memory allocation, we reserve all
memory in advance (unless we're in 32-bit legacy mode, in which
case we do not preallocate memory). As in, we do an anonymous
mmap() of the entire maximum size of memory per hugepage size, per
socket (which is limited to either RTE_MAX_MEMSEG_PER_TYPE pages or
RTE_MAX_MEM_MB_PER_TYPE megabytes worth of memory, whichever is the
smaller one), split over multiple lists (which are limited to
either RTE_MAX_MEMSEG_PER_LIST memsegs or RTE_MAX_MEM_MB_PER_LIST
megabytes per list, whichever is the smaller one). There is also
a global limit of CONFIG_RTE_MAX_MEM_MB megabytes, which is mainly
used for 32-bit targets to limit amounts of preallocated memory,
but can be used to place an upper limit on total amount of VA
memory that can be allocated by DPDK application.
So, for each hugepage size, we get (by default) up to 128G worth
of memory, per socket, split into chunks of up to 32G in size.
The address space is claimed at the start, in eal_common_memory.c.
The actual page allocation code is in eal_memalloc.c (Linux-only),
and largely consists of copied EAL memory init code.
Pages in the list are also indexed by address. That is, in order
to figure out where the page belongs, one can simply look at base
address for a memseg list. Similarly, figuring out IOVA address
of a memzone is a matter of finding the right memseg list, getting
offset and dividing by page size to get the appropriate memseg.
This commit also removes rte_eal_dump_physmem_layout() call,
according to deprecation notice [1], and removes that deprecation
notice as well.
On 32-bit targets due to limited VA space, DPDK will no longer
spread memory to different sockets like before. Instead, it will
(by default) allocate all of the memory on socket where master
lcore is. To override this behavior, --socket-mem must be used.
The rest of the changes are really ripple effects from the memseg
change - heap changes, compile fixes, and rewrites to support
fbarray-backed memseg lists. Due to earlier switch to _walk()
functions, most of the changes are simple fixes, however some
of the _walk() calls were switched to memseg list walk, where
it made sense to do so.
Additionally, we are also switching locks from flock() to fcntl().
Down the line, we will be introducing single-file segments option,
and we cannot use flock() locks to lock parts of the file. Therefore,
we will use fcntl() locks for legacy mem as well, in case someone is
unfortunate enough to accidentally start legacy mem primary process
alongside an already working non-legacy mem-based primary process.
[1] http://dpdk.org/dev/patchwork/patch/34002/
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Tested-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Tested-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
2018-04-11 12:30:24 +00:00
|
|
|
if (msl->page_sz == RTE_PGSIZE_16G)
|
2018-04-11 12:30:06 +00:00
|
|
|
wa->hugepage_16GB_avail = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
static int
|
|
|
|
test_memzone_reserve_flags(void)
|
|
|
|
{
|
|
|
|
const struct rte_memzone *mz;
|
2018-04-11 12:30:06 +00:00
|
|
|
struct walk_arg wa;
|
|
|
|
int hugepage_2MB_avail, hugepage_1GB_avail;
|
|
|
|
int hugepage_16MB_avail, hugepage_16GB_avail;
|
2014-02-11 16:24:25 +00:00
|
|
|
const size_t size = 100;
|
2018-04-11 12:30:06 +00:00
|
|
|
|
|
|
|
memset(&wa, 0, sizeof(wa));
|
|
|
|
|
mem: replace memseg with memseg lists
Before, we were aggregating multiple pages into one memseg, so the
number of memsegs was small. Now, each page gets its own memseg,
so the list of memsegs is huge. To accommodate the new memseg list
size and to keep the under-the-hood workings sane, the memseg list
is now not just a single list, but multiple lists. To be precise,
each hugepage size available on the system gets one or more memseg
lists, per socket.
In order to support dynamic memory allocation, we reserve all
memory in advance (unless we're in 32-bit legacy mode, in which
case we do not preallocate memory). As in, we do an anonymous
mmap() of the entire maximum size of memory per hugepage size, per
socket (which is limited to either RTE_MAX_MEMSEG_PER_TYPE pages or
RTE_MAX_MEM_MB_PER_TYPE megabytes worth of memory, whichever is the
smaller one), split over multiple lists (which are limited to
either RTE_MAX_MEMSEG_PER_LIST memsegs or RTE_MAX_MEM_MB_PER_LIST
megabytes per list, whichever is the smaller one). There is also
a global limit of CONFIG_RTE_MAX_MEM_MB megabytes, which is mainly
used for 32-bit targets to limit amounts of preallocated memory,
but can be used to place an upper limit on total amount of VA
memory that can be allocated by DPDK application.
So, for each hugepage size, we get (by default) up to 128G worth
of memory, per socket, split into chunks of up to 32G in size.
The address space is claimed at the start, in eal_common_memory.c.
The actual page allocation code is in eal_memalloc.c (Linux-only),
and largely consists of copied EAL memory init code.
Pages in the list are also indexed by address. That is, in order
to figure out where the page belongs, one can simply look at base
address for a memseg list. Similarly, figuring out IOVA address
of a memzone is a matter of finding the right memseg list, getting
offset and dividing by page size to get the appropriate memseg.
This commit also removes rte_eal_dump_physmem_layout() call,
according to deprecation notice [1], and removes that deprecation
notice as well.
On 32-bit targets due to limited VA space, DPDK will no longer
spread memory to different sockets like before. Instead, it will
(by default) allocate all of the memory on socket where master
lcore is. To override this behavior, --socket-mem must be used.
The rest of the changes are really ripple effects from the memseg
change - heap changes, compile fixes, and rewrites to support
fbarray-backed memseg lists. Due to earlier switch to _walk()
functions, most of the changes are simple fixes, however some
of the _walk() calls were switched to memseg list walk, where
it made sense to do so.
Additionally, we are also switching locks from flock() to fcntl().
Down the line, we will be introducing single-file segments option,
and we cannot use flock() locks to lock parts of the file. Therefore,
we will use fcntl() locks for legacy mem as well, in case someone is
unfortunate enough to accidentally start legacy mem primary process
alongside an already working non-legacy mem-based primary process.
[1] http://dpdk.org/dev/patchwork/patch/34002/
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Tested-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Tested-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Tested-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
2018-04-11 12:30:24 +00:00
|
|
|
rte_memseg_list_walk(find_available_pagesz, &wa);
|
2018-04-11 12:30:06 +00:00
|
|
|
|
|
|
|
hugepage_2MB_avail = wa.hugepage_2MB_avail;
|
|
|
|
hugepage_1GB_avail = wa.hugepage_1GB_avail;
|
|
|
|
hugepage_16MB_avail = wa.hugepage_16MB_avail;
|
|
|
|
hugepage_16GB_avail = wa.hugepage_16GB_avail;
|
|
|
|
|
2014-11-25 22:17:14 +00:00
|
|
|
/* Display the availability of 2MB ,1GB, 16MB, 16GB pages */
|
2012-09-04 12:54:00 +00:00
|
|
|
if (hugepage_2MB_avail)
|
|
|
|
printf("2MB Huge pages available\n");
|
|
|
|
if (hugepage_1GB_avail)
|
|
|
|
printf("1GB Huge pages available\n");
|
2014-11-25 22:17:14 +00:00
|
|
|
if (hugepage_16MB_avail)
|
|
|
|
printf("16MB Huge pages available\n");
|
|
|
|
if (hugepage_16GB_avail)
|
|
|
|
printf("16GB Huge pages available\n");
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
|
|
|
* If 2MB pages available, check that a small memzone is correctly
|
|
|
|
* reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag.
|
|
|
|
* Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an
|
|
|
|
* available page size (i.e 1GB ) when 2MB pages are unavailable.
|
|
|
|
*/
|
|
|
|
if (hugepage_2MB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M"),
|
|
|
|
size, SOCKET_ID_ANY, RTE_MEMZONE_2MB);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 2MB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_2M) {
|
|
|
|
printf("hugepage_sz not equal 2M\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
2012-09-04 12:54:00 +00:00
|
|
|
RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
|
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 2MB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_2M) {
|
|
|
|
printf("hugepage_sz not equal 2M\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* Check if 1GB huge pages are unavailable, that function fails unless
|
|
|
|
* HINT flag is indicated
|
|
|
|
*/
|
|
|
|
if (!hugepage_1GB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_1G_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
2012-09-04 12:54:00 +00:00
|
|
|
RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
|
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 1GB & HINT\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_2M) {
|
|
|
|
printf("hugepage_sz not equal 2M\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_1G"), size,
|
|
|
|
SOCKET_ID_ANY, RTE_MEMZONE_1GB);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("MEMZONE FLAG 1GB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*As with 2MB tests above for 1GB huge page requests*/
|
|
|
|
if (hugepage_1GB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G"),
|
|
|
|
size, SOCKET_ID_ANY, RTE_MEMZONE_1GB);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 1GB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_1G) {
|
|
|
|
printf("hugepage_sz not equal 1G\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
2012-09-04 12:54:00 +00:00
|
|
|
RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
|
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 1GB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_1G) {
|
|
|
|
printf("hugepage_sz not equal 1G\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* Check if 1GB huge pages are unavailable, that function fails unless
|
|
|
|
* HINT flag is indicated
|
|
|
|
*/
|
|
|
|
if (!hugepage_2MB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
2012-09-04 12:54:00 +00:00
|
|
|
RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
|
|
|
|
if (mz == NULL){
|
|
|
|
printf("MEMZONE FLAG 2MB & HINT\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_1G) {
|
|
|
|
printf("hugepage_sz not equal 1G\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_2M"), size,
|
|
|
|
SOCKET_ID_ANY, RTE_MEMZONE_2MB);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("MEMZONE FLAG 2MB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hugepage_2MB_avail && hugepage_1GB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
|
|
|
RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
|
2017-12-21 18:19:44 +00:00
|
|
|
if (mz == NULL) {
|
2012-09-04 12:54:00 +00:00
|
|
|
printf("BOTH SIZES SET\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-12-21 18:19:44 +00:00
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_1G &&
|
|
|
|
mz->hugepage_sz != RTE_PGSIZE_2M) {
|
|
|
|
printf("Wrong size when both sizes set\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
}
|
2014-11-25 22:17:14 +00:00
|
|
|
/*
|
|
|
|
* This option is for IBM Power. If 16MB pages available, check
|
|
|
|
* that a small memzone is correctly reserved from 16MB huge pages
|
|
|
|
* when requested by the RTE_MEMZONE_16MB flag. Also check that
|
|
|
|
* RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an available
|
|
|
|
* page size (i.e 16GB ) when 16MB pages are unavailable.
|
|
|
|
*/
|
|
|
|
if (hugepage_16MB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16M"),
|
|
|
|
size, SOCKET_ID_ANY, RTE_MEMZONE_16MB);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 16MB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_16M) {
|
|
|
|
printf("hugepage_sz not equal 16M\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-11-25 22:17:14 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_16M_HINT"), size,
|
|
|
|
SOCKET_ID_ANY,
|
|
|
|
RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz == NULL) {
|
2017-12-21 18:19:43 +00:00
|
|
|
printf("MEMZONE FLAG 16MB\n");
|
2014-11-25 22:17:14 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_16M) {
|
|
|
|
printf("hugepage_sz not equal 16M\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-11-25 22:17:14 +00:00
|
|
|
|
|
|
|
/* Check if 1GB huge pages are unavailable, that function fails
|
|
|
|
* unless HINT flag is indicated
|
|
|
|
*/
|
|
|
|
if (!hugepage_16GB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_16G_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
|
|
|
RTE_MEMZONE_16GB |
|
|
|
|
RTE_MEMZONE_SIZE_HINT_ONLY);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 16GB & HINT\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_16M) {
|
|
|
|
printf("hugepage_sz not equal 16M\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-11-25 22:17:14 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_16G"),
|
|
|
|
size,
|
|
|
|
SOCKET_ID_ANY, RTE_MEMZONE_16GB);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("MEMZONE FLAG 16GB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*As with 16MB tests above for 16GB huge page requests*/
|
|
|
|
if (hugepage_16GB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16G"),
|
|
|
|
size, SOCKET_ID_ANY, RTE_MEMZONE_16GB);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 16GB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_16G) {
|
|
|
|
printf("hugepage_sz not equal 16G\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-11-25 22:17:14 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_16G_HINT"), size,
|
|
|
|
SOCKET_ID_ANY,
|
|
|
|
RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 16GB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_16G) {
|
|
|
|
printf("hugepage_sz not equal 16G\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-11-25 22:17:14 +00:00
|
|
|
|
|
|
|
/* Check if 1GB huge pages are unavailable, that function fails
|
|
|
|
* unless HINT flag is indicated
|
|
|
|
*/
|
|
|
|
if (!hugepage_16MB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_16M_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
|
|
|
RTE_MEMZONE_16MB |
|
|
|
|
RTE_MEMZONE_SIZE_HINT_ONLY);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz == NULL) {
|
|
|
|
printf("MEMZONE FLAG 16MB & HINT\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_16G) {
|
|
|
|
printf("hugepage_sz not equal 16G\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_16M"),
|
|
|
|
size, SOCKET_ID_ANY, RTE_MEMZONE_16MB);
|
2014-11-25 22:17:14 +00:00
|
|
|
if (mz != NULL) {
|
|
|
|
printf("MEMZONE FLAG 16MB\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hugepage_16MB_avail && hugepage_16GB_avail) {
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(
|
|
|
|
TEST_MEMZONE_NAME("flag_zone_16M_HINT"),
|
|
|
|
size, SOCKET_ID_ANY,
|
|
|
|
RTE_MEMZONE_16MB|RTE_MEMZONE_16GB);
|
2017-12-21 18:19:44 +00:00
|
|
|
if (mz == NULL) {
|
2014-11-25 22:17:14 +00:00
|
|
|
printf("BOTH SIZES SET\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-12-21 18:19:44 +00:00
|
|
|
if (mz->hugepage_sz != RTE_PGSIZE_16G &&
|
|
|
|
mz->hugepage_sz != RTE_PGSIZE_16M) {
|
|
|
|
printf("Wrong size when both sizes set\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-11-25 22:17:14 +00:00
|
|
|
}
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-15 16:32:22 +00:00
|
|
|
|
|
|
|
/* Find the heap with the greatest free block size */
|
|
|
|
static size_t
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
find_max_block_free_size(unsigned int align, unsigned int socket_id)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2015-07-15 16:32:22 +00:00
|
|
|
struct rte_malloc_socket_stats stats;
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
size_t len, overhead;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
rte_malloc_get_socket_stats(socket_id, &stats);
|
|
|
|
|
|
|
|
len = stats.greatest_free_size;
|
|
|
|
overhead = MALLOC_ELEM_OVERHEAD;
|
|
|
|
|
|
|
|
if (len == 0)
|
|
|
|
return 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
align = RTE_CACHE_LINE_ROUNDUP(align);
|
|
|
|
overhead += align;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
if (len < overhead)
|
2015-07-15 16:32:22 +00:00
|
|
|
return 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
return len - overhead;
|
2015-07-15 16:32:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_memzone_reserve_max(void)
|
|
|
|
{
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
unsigned int i;
|
2015-07-15 16:32:22 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
for (i = 0; i < rte_socket_count(); i++) {
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
size_t maxlen;
|
|
|
|
int socket;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
socket = rte_socket_id_by_idx(i);
|
|
|
|
maxlen = find_max_block_free_size(0, socket);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
if (maxlen == 0) {
|
|
|
|
printf("There is no space left!\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0,
|
|
|
|
socket, 0);
|
|
|
|
if (mz == NULL) {
|
|
|
|
printf("Failed to reserve a big chunk of memory - %s\n",
|
|
|
|
rte_strerror(rte_errno));
|
|
|
|
rte_dump_physmem_layout(stdout);
|
|
|
|
rte_memzone_dump(stdout);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
if (mz->len != maxlen) {
|
|
|
|
printf("Memzone reserve with 0 size did not return bigest block\n");
|
|
|
|
printf("Expected size = %zu, actual size = %zu\n",
|
|
|
|
maxlen, mz->len);
|
|
|
|
rte_dump_physmem_layout(stdout);
|
|
|
|
rte_memzone_dump(stdout);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_memzone_reserve_max_aligned(void)
|
|
|
|
{
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
unsigned int i;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
for (i = 0; i < rte_socket_count(); i++) {
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
size_t maxlen, minlen = 0;
|
|
|
|
int socket;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
socket = rte_socket_id_by_idx(i);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
/* random alignment */
|
|
|
|
rte_srand((unsigned int)rte_rdtsc());
|
|
|
|
const unsigned int align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
|
2012-12-19 23:00:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
/* memzone size may be between size and size - align */
|
|
|
|
minlen = find_max_block_free_size(align, socket);
|
|
|
|
maxlen = find_max_block_free_size(0, socket);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
if (minlen == 0 || maxlen == 0) {
|
|
|
|
printf("There is no space left for biggest %u-aligned memzone!\n",
|
|
|
|
align);
|
|
|
|
return 0;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
mz = rte_memzone_reserve_aligned(
|
|
|
|
TEST_MEMZONE_NAME("max_zone_aligned"),
|
|
|
|
0, socket, 0, align);
|
|
|
|
if (mz == NULL) {
|
|
|
|
printf("Failed to reserve a big chunk of memory - %s\n",
|
|
|
|
rte_strerror(rte_errno));
|
|
|
|
rte_dump_physmem_layout(stdout);
|
|
|
|
rte_memzone_dump(stdout);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (mz->addr != RTE_PTR_ALIGN(mz->addr, align)) {
|
|
|
|
printf("Memzone reserve with 0 size and alignment %u did not return aligned block\n",
|
|
|
|
align);
|
|
|
|
rte_dump_physmem_layout(stdout);
|
|
|
|
rte_memzone_dump(stdout);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mz->len < minlen || mz->len > maxlen) {
|
|
|
|
printf("Memzone reserve with 0 size and alignment %u did not return"
|
|
|
|
" bigest block\n", align);
|
|
|
|
printf("Expected size = %zu-%zu, actual size = %zu\n",
|
|
|
|
minlen, maxlen, mz->len);
|
|
|
|
rte_dump_physmem_layout(stdout);
|
|
|
|
rte_memzone_dump(stdout);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-08 12:33:14 +00:00
|
|
|
|
memzone: improve zero-length reserve
Currently, reserving zero-length memzones is done by looking at
malloc statistics, and reserving biggest sized element found in those
statistics. This has two issues.
First, there is a race condition. The heap is unlocked between the
time we check stats, and the time we reserve malloc element for memzone.
This may lead to inability to reserve the memzone we wanted to reserve,
because another allocation might have taken place and biggest sized
element may no longer be available.
Second, the size returned by malloc statistics does not include any
alignment information, which is worked around by being conservative and
subtracting alignment length from the final result. This leads to
fragmentation and reserving memzones that could have been bigger but
aren't.
Fix all of this by using earlier-introduced operation to reserve
biggest possible malloc element. This, however, comes with a trade-off,
because we can only lock one heap at a time. So, if we check the first
available heap and find *any* element at all, that element will be
considered "the biggest", even though other heaps might have bigger
elements. We cannot know what other heaps have before we try and
allocate it, and it is not a good idea to lock all of the heaps at
the same time, so, we will just document this limitation and
encourage users to reserve memzones with socket id properly set.
Also, fixup unit tests to account for the new behavior.
Fixes: fafcc11985a2 ("mem: rework memzone to be allocated by malloc")
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
2018-05-31 09:51:01 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_memzone_aligned(void)
|
|
|
|
{
|
|
|
|
const struct rte_memzone *memzone_aligned_32;
|
|
|
|
const struct rte_memzone *memzone_aligned_128;
|
|
|
|
const struct rte_memzone *memzone_aligned_256;
|
|
|
|
const struct rte_memzone *memzone_aligned_512;
|
|
|
|
const struct rte_memzone *memzone_aligned_1024;
|
|
|
|
|
|
|
|
/* memzone that should automatically be adjusted to align on 64 bytes */
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_aligned_32 = rte_memzone_reserve_aligned(
|
|
|
|
TEST_MEMZONE_NAME("aligned_32"), 100, SOCKET_ID_ANY, 0,
|
|
|
|
32);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* memzone that is supposed to be aligned on a 128 byte boundary */
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_aligned_128 = rte_memzone_reserve_aligned(
|
|
|
|
TEST_MEMZONE_NAME("aligned_128"), 100, SOCKET_ID_ANY, 0,
|
|
|
|
128);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* memzone that is supposed to be aligned on a 256 byte boundary */
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_aligned_256 = rte_memzone_reserve_aligned(
|
|
|
|
TEST_MEMZONE_NAME("aligned_256"), 100, SOCKET_ID_ANY, 0,
|
|
|
|
256);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* memzone that is supposed to be aligned on a 512 byte boundary */
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_aligned_512 = rte_memzone_reserve_aligned(
|
|
|
|
TEST_MEMZONE_NAME("aligned_512"), 100, SOCKET_ID_ANY, 0,
|
|
|
|
512);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* memzone that is supposed to be aligned on a 1024 byte boundary */
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_aligned_1024 = rte_memzone_reserve_aligned(
|
|
|
|
TEST_MEMZONE_NAME("aligned_1024"), 100, SOCKET_ID_ANY,
|
|
|
|
0, 1024);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
printf("check alignments and lengths\n");
|
2012-12-19 23:00:00 +00:00
|
|
|
if (memzone_aligned_32 == NULL) {
|
|
|
|
printf("Unable to reserve 64-byte aligned memzone!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((memzone_aligned_32->iova & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if (((uintptr_t) memzone_aligned_32->addr & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((memzone_aligned_32->len & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
if (memzone_aligned_128 == NULL) {
|
|
|
|
printf("Unable to reserve 128-byte aligned memzone!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((memzone_aligned_128->iova & 127) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
|
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((memzone_aligned_128->len & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
if (memzone_aligned_256 == NULL) {
|
|
|
|
printf("Unable to reserve 256-byte aligned memzone!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((memzone_aligned_256->iova & 255) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
|
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((memzone_aligned_256->len & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
if (memzone_aligned_512 == NULL) {
|
|
|
|
printf("Unable to reserve 512-byte aligned memzone!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((memzone_aligned_512->iova & 511) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
|
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((memzone_aligned_512->len & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
if (memzone_aligned_1024 == NULL) {
|
|
|
|
printf("Unable to reserve 1024-byte aligned memzone!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((memzone_aligned_1024->iova & 1023) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
|
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((memzone_aligned_1024->len & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* check that zones don't overlap */
|
|
|
|
printf("check overlapping\n");
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
|
|
|
|
memzone_aligned_128->iova, memzone_aligned_128->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
|
|
|
|
memzone_aligned_256->iova, memzone_aligned_256->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
|
|
|
|
memzone_aligned_512->iova, memzone_aligned_512->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_32->iova, memzone_aligned_32->len,
|
|
|
|
memzone_aligned_1024->iova, memzone_aligned_1024->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
|
|
|
|
memzone_aligned_256->iova, memzone_aligned_256->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
|
|
|
|
memzone_aligned_512->iova, memzone_aligned_512->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_128->iova, memzone_aligned_128->len,
|
|
|
|
memzone_aligned_1024->iova, memzone_aligned_1024->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len,
|
|
|
|
memzone_aligned_512->iova, memzone_aligned_512->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_256->iova, memzone_aligned_256->len,
|
|
|
|
memzone_aligned_1024->iova, memzone_aligned_1024->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone_aligned_512->iova, memzone_aligned_512->len,
|
|
|
|
memzone_aligned_1024->iova, memzone_aligned_1024->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-08 12:33:14 +00:00
|
|
|
|
|
|
|
/* free all used zones */
|
|
|
|
if (rte_memzone_free(memzone_aligned_32)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(memzone_aligned_128)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(memzone_aligned_256)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(memzone_aligned_512)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(memzone_aligned_1024)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-11 16:24:25 +00:00
|
|
|
static int
|
|
|
|
check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
|
|
|
|
uint32_t bound)
|
|
|
|
{
|
|
|
|
const struct rte_memzone *mz;
|
2017-11-04 01:22:28 +00:00
|
|
|
rte_iova_t bmask;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2017-11-04 01:22:28 +00:00
|
|
|
bmask = ~((rte_iova_t)bound - 1);
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
if ((mz = rte_memzone_reserve_bounded(name, len, SOCKET_ID_ANY, 0,
|
2014-02-11 16:24:25 +00:00
|
|
|
align, bound)) == NULL) {
|
|
|
|
printf("%s(%s): memzone creation failed\n",
|
|
|
|
__func__, name);
|
2016-01-27 13:58:30 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
|
|
|
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((mz->iova & ((rte_iova_t)align - 1)) != 0) {
|
2014-02-11 16:24:25 +00:00
|
|
|
printf("%s(%s): invalid phys addr alignment\n",
|
|
|
|
__func__, mz->name);
|
2016-01-27 13:58:30 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) {
|
|
|
|
printf("%s(%s): invalid virtual addr alignment\n",
|
|
|
|
__func__, mz->name);
|
2016-01-27 13:58:30 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
|
|
|
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((mz->len & RTE_CACHE_LINE_MASK) != 0 || mz->len < len ||
|
|
|
|
mz->len < RTE_CACHE_LINE_SIZE) {
|
2014-02-11 16:24:25 +00:00
|
|
|
printf("%s(%s): invalid length\n",
|
|
|
|
__func__, mz->name);
|
2016-01-27 13:58:30 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
|
|
|
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((mz->iova & bmask) !=
|
|
|
|
((mz->iova + mz->len - 1) & bmask)) {
|
2014-02-11 16:24:25 +00:00
|
|
|
printf("%s(%s): invalid memzone boundary %u crossed\n",
|
|
|
|
__func__, mz->name, bound);
|
2016-01-27 13:58:30 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(mz)) {
|
|
|
|
printf("Fail memzone free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-01-27 13:58:30 +00:00
|
|
|
return 0;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_memzone_bounded(void)
|
|
|
|
{
|
|
|
|
const struct rte_memzone *memzone_err;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* should fail as boundary is not power of two */
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_err = rte_memzone_reserve_bounded(
|
|
|
|
TEST_MEMZONE_NAME("bounded_error_31"), 100,
|
|
|
|
SOCKET_ID_ANY, 0, 32, UINT32_MAX);
|
|
|
|
if (memzone_err != NULL) {
|
2014-02-11 16:24:25 +00:00
|
|
|
printf("%s(%s)created a memzone with invalid boundary "
|
|
|
|
"conditions\n", __func__, memzone_err->name);
|
2016-01-27 13:58:30 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2014-02-11 16:24:25 +00:00
|
|
|
/* should fail as len is greater then boundary */
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_err = rte_memzone_reserve_bounded(
|
|
|
|
TEST_MEMZONE_NAME("bounded_error_32"), 100,
|
|
|
|
SOCKET_ID_ANY, 0, 32, 32);
|
|
|
|
if (memzone_err != NULL) {
|
2014-02-11 16:24:25 +00:00
|
|
|
printf("%s(%s)created a memzone with invalid boundary "
|
|
|
|
"conditions\n", __func__, memzone_err->name);
|
2016-01-27 13:58:30 +00:00
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_128"), 100, 128,
|
|
|
|
128);
|
|
|
|
if (rc != 0)
|
2016-01-27 13:58:30 +00:00
|
|
|
return rc;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_256"), 100, 256,
|
|
|
|
128);
|
|
|
|
if (rc != 0)
|
2016-01-27 13:58:30 +00:00
|
|
|
return rc;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K"), 100, 64,
|
|
|
|
1024);
|
|
|
|
if (rc != 0)
|
2016-01-27 13:58:30 +00:00
|
|
|
return rc;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K_MAX"), 0, 64,
|
|
|
|
1024);
|
|
|
|
if (rc != 0)
|
2016-01-27 13:58:30 +00:00
|
|
|
return rc;
|
2014-02-11 16:24:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-15 16:32:25 +00:00
|
|
|
static int
|
|
|
|
test_memzone_free(void)
|
|
|
|
{
|
2018-01-15 05:43:33 +00:00
|
|
|
const struct rte_memzone *mz[RTE_MAX_MEMZONE + 1];
|
2015-07-15 16:32:25 +00:00
|
|
|
int i;
|
|
|
|
char name[20];
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0"), 2000,
|
|
|
|
SOCKET_ID_ANY, 0);
|
|
|
|
mz[1] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone1"), 4000,
|
|
|
|
SOCKET_ID_ANY, 0);
|
2015-07-15 16:32:25 +00:00
|
|
|
|
|
|
|
if (mz[0] > mz[1])
|
|
|
|
return -1;
|
2018-02-01 10:14:50 +00:00
|
|
|
if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0")))
|
2015-07-15 16:32:25 +00:00
|
|
|
return -1;
|
2018-02-01 10:14:50 +00:00
|
|
|
if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1")))
|
2015-07-15 16:32:25 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (rte_memzone_free(mz[0])) {
|
|
|
|
printf("Fail memzone free - tempzone0\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-01 10:14:50 +00:00
|
|
|
if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0"))) {
|
2015-07-15 16:32:25 +00:00
|
|
|
printf("Found previously free memzone - tempzone0\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-01 10:14:50 +00:00
|
|
|
mz[2] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone2"), 2000,
|
|
|
|
SOCKET_ID_ANY, 0);
|
2015-07-15 16:32:25 +00:00
|
|
|
|
|
|
|
if (mz[2] > mz[1]) {
|
|
|
|
printf("tempzone2 should have gotten the free entry from tempzone0\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(mz[2])) {
|
|
|
|
printf("Fail memzone free - tempzone2\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-01 10:14:50 +00:00
|
|
|
if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone2"))) {
|
2015-07-15 16:32:25 +00:00
|
|
|
printf("Found previously free memzone - tempzone2\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(mz[1])) {
|
|
|
|
printf("Fail memzone free - tempzone1\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-01 10:14:50 +00:00
|
|
|
if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1"))) {
|
2015-07-15 16:32:25 +00:00
|
|
|
printf("Found previously free memzone - tempzone1\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
do {
|
2018-02-01 10:14:50 +00:00
|
|
|
snprintf(name, sizeof(name), TEST_MEMZONE_NAME("tempzone%u"),
|
|
|
|
i);
|
2015-07-15 16:32:25 +00:00
|
|
|
mz[i] = rte_memzone_reserve(name, 1, SOCKET_ID_ANY, 0);
|
|
|
|
} while (mz[i++] != NULL);
|
|
|
|
|
|
|
|
if (rte_memzone_free(mz[0])) {
|
|
|
|
printf("Fail memzone free - tempzone0\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-01 10:14:50 +00:00
|
|
|
mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0new"), 0,
|
|
|
|
SOCKET_ID_ANY, 0);
|
2015-07-15 16:32:25 +00:00
|
|
|
|
|
|
|
if (mz[0] == NULL) {
|
|
|
|
printf("Fail to create memzone - tempzone0new - when MAX memzones were "
|
|
|
|
"created and one was free\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = i - 2; i >= 0; i--) {
|
|
|
|
if (rte_memzone_free(mz[i])) {
|
|
|
|
printf("Fail memzone free - tempzone%d\n", i);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-18 11:29:23 +00:00
|
|
|
static int
|
2017-11-08 12:33:14 +00:00
|
|
|
test_memzone_basic(void)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
|
|
|
const struct rte_memzone *memzone1;
|
|
|
|
const struct rte_memzone *memzone2;
|
|
|
|
const struct rte_memzone *memzone3;
|
2014-02-11 16:24:25 +00:00
|
|
|
const struct rte_memzone *memzone4;
|
2012-09-04 12:54:00 +00:00
|
|
|
const struct rte_memzone *mz;
|
2018-02-01 10:14:49 +00:00
|
|
|
int memzone_cnt_after, memzone_cnt_expected;
|
|
|
|
int memzone_cnt_before =
|
2018-04-11 12:30:25 +00:00
|
|
|
rte_eal_get_configuration()->mem_config->memzones.count;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone1 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
|
2012-09-04 12:54:00 +00:00
|
|
|
SOCKET_ID_ANY, 0);
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone2 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone2"), 1000,
|
2012-09-04 12:54:00 +00:00
|
|
|
0, 0);
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone3 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone3"), 1000,
|
2012-09-04 12:54:00 +00:00
|
|
|
1, 0);
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone4 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone4"), 1024,
|
2014-02-11 16:24:25 +00:00
|
|
|
SOCKET_ID_ANY, 0);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* memzone3 may be NULL if we don't have NUMA */
|
2014-02-11 16:24:25 +00:00
|
|
|
if (memzone1 == NULL || memzone2 == NULL || memzone4 == NULL)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-02-01 10:14:49 +00:00
|
|
|
/* check how many memzones we are expecting */
|
|
|
|
memzone_cnt_expected = memzone_cnt_before +
|
|
|
|
(memzone1 != NULL) + (memzone2 != NULL) +
|
|
|
|
(memzone3 != NULL) + (memzone4 != NULL);
|
|
|
|
|
|
|
|
memzone_cnt_after =
|
2018-04-11 12:30:25 +00:00
|
|
|
rte_eal_get_configuration()->mem_config->memzones.count;
|
2018-02-01 10:14:49 +00:00
|
|
|
|
|
|
|
if (memzone_cnt_after != memzone_cnt_expected)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
2014-05-02 23:42:56 +00:00
|
|
|
rte_memzone_dump(stdout);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* check cache-line alignments */
|
|
|
|
printf("check alignments and lengths\n");
|
|
|
|
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((memzone1->iova & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if ((memzone2->iova & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2017-11-04 01:22:28 +00:00
|
|
|
if (memzone3 != NULL && (memzone3->iova & RTE_CACHE_LINE_MASK) != 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if ((memzone2->len & RTE_CACHE_LINE_MASK) != 0 || memzone2->len == 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
2014-11-19 12:26:06 +00:00
|
|
|
if (memzone3 != NULL && ((memzone3->len & RTE_CACHE_LINE_MASK) != 0 ||
|
2012-09-04 12:54:00 +00:00
|
|
|
memzone3->len == 0))
|
|
|
|
return -1;
|
2014-02-11 16:24:25 +00:00
|
|
|
if (memzone4->len != 1024)
|
|
|
|
return -1;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* check that zones don't overlap */
|
|
|
|
printf("check overlapping\n");
|
|
|
|
|
2017-11-04 01:22:28 +00:00
|
|
|
if (is_memory_overlap(memzone1->iova, memzone1->len,
|
|
|
|
memzone2->iova, memzone2->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
if (memzone3 != NULL &&
|
2017-11-04 01:22:28 +00:00
|
|
|
is_memory_overlap(memzone1->iova, memzone1->len,
|
|
|
|
memzone3->iova, memzone3->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
if (memzone3 != NULL &&
|
2017-11-04 01:22:28 +00:00
|
|
|
is_memory_overlap(memzone2->iova, memzone2->len,
|
|
|
|
memzone3->iova, memzone3->len))
|
2012-09-04 12:54:00 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
printf("check socket ID\n");
|
|
|
|
|
|
|
|
/* memzone2 must be on socket id 0 and memzone3 on socket 1 */
|
|
|
|
if (memzone2->socket_id != 0)
|
|
|
|
return -1;
|
|
|
|
if (memzone3 != NULL && memzone3->socket_id != 1)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
printf("test zone lookup\n");
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_lookup(TEST_MEMZONE_NAME("testzone1"));
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mz != memzone1)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
printf("test duplcate zone name\n");
|
2018-02-01 10:14:50 +00:00
|
|
|
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
|
2012-09-04 12:54:00 +00:00
|
|
|
SOCKET_ID_ANY, 0);
|
|
|
|
if (mz != NULL)
|
|
|
|
return -1;
|
|
|
|
|
2017-11-08 12:33:14 +00:00
|
|
|
if (rte_memzone_free(memzone1)) {
|
|
|
|
printf("Fail memzone free - memzone1\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(memzone2)) {
|
|
|
|
printf("Fail memzone free - memzone2\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (memzone3 && rte_memzone_free(memzone3)) {
|
|
|
|
printf("Fail memzone free - memzone3\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rte_memzone_free(memzone4)) {
|
|
|
|
printf("Fail memzone free - memzone4\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-01 10:14:49 +00:00
|
|
|
memzone_cnt_after =
|
2018-04-11 12:30:25 +00:00
|
|
|
rte_eal_get_configuration()->mem_config->memzones.count;
|
2018-02-01 10:14:49 +00:00
|
|
|
if (memzone_cnt_after != memzone_cnt_before)
|
|
|
|
return -1;
|
|
|
|
|
2017-11-08 12:33:14 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-01 10:14:50 +00:00
|
|
|
static int test_memzones_left;
|
|
|
|
static int memzone_walk_cnt;
|
|
|
|
static void memzone_walk_clb(const struct rte_memzone *mz,
|
2017-11-08 12:33:14 +00:00
|
|
|
void *arg __rte_unused)
|
|
|
|
{
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_walk_cnt++;
|
|
|
|
if (!strncmp(TEST_MEMZONE_NAME(""), mz->name, RTE_MEMZONE_NAMESIZE))
|
|
|
|
test_memzones_left++;
|
2017-11-08 12:33:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
test_memzone(void)
|
|
|
|
{
|
2018-02-01 10:14:50 +00:00
|
|
|
/* take note of how many memzones were allocated before running */
|
2018-04-11 12:30:25 +00:00
|
|
|
int memzone_cnt =
|
|
|
|
rte_eal_get_configuration()->mem_config->memzones.count;
|
2018-02-01 10:14:50 +00:00
|
|
|
|
2017-11-08 12:33:14 +00:00
|
|
|
printf("test basic memzone API\n");
|
|
|
|
if (test_memzone_basic() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2015-07-15 16:32:25 +00:00
|
|
|
printf("test free memzone\n");
|
|
|
|
if (test_memzone_free() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
printf("test reserving memzone with bigger size than the maximum\n");
|
|
|
|
if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
printf("test memzone_reserve flags\n");
|
|
|
|
if (test_memzone_reserve_flags() < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
printf("test alignment for memzone_reserve\n");
|
|
|
|
if (test_memzone_aligned() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2014-02-11 16:24:25 +00:00
|
|
|
printf("test boundary alignment for memzone_reserve\n");
|
|
|
|
if (test_memzone_bounded() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
printf("test invalid alignment for memzone_reserve\n");
|
|
|
|
if (test_memzone_invalid_alignment() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
printf("test reserving the largest size memzone possible\n");
|
|
|
|
if (test_memzone_reserve_max() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
printf("test reserving the largest size aligned memzone possible\n");
|
|
|
|
if (test_memzone_reserve_max_aligned() < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-11-08 12:33:14 +00:00
|
|
|
printf("check memzone cleanup\n");
|
2018-02-01 10:14:50 +00:00
|
|
|
memzone_walk_cnt = 0;
|
|
|
|
test_memzones_left = 0;
|
2017-11-08 12:33:14 +00:00
|
|
|
rte_memzone_walk(memzone_walk_clb, NULL);
|
2018-02-01 10:14:50 +00:00
|
|
|
if (memzone_walk_cnt != memzone_cnt || test_memzones_left > 0) {
|
2017-11-08 12:33:14 +00:00
|
|
|
printf("there are some memzones left after test\n");
|
|
|
|
rte_memzone_dump(stdout);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2014-08-18 11:29:23 +00:00
|
|
|
|
2016-07-13 12:38:13 +00:00
|
|
|
REGISTER_TEST_COMMAND(memzone_autotest, test_memzone);
|