add prefix to cache line macros
CACHE_LINE_SIZE is a macro defined in machine/param.h in FreeBSD and conflicts with DPDK macro version. Adding RTE_ prefix to avoid conflicts. CACHE_LINE_MASK and CACHE_LINE_ROUNDUP are also prefixed. Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com> [Thomas: updated on HEAD, including PPC]
This commit is contained in:
parent
be04c70727
commit
fdf20fa7be
@ -472,7 +472,7 @@ tracef_init(void)
|
|||||||
struct ipv6_5tuple *w;
|
struct ipv6_5tuple *w;
|
||||||
|
|
||||||
sz = config.nb_traces * (config.ipv6 ? sizeof(*w) : sizeof(*v));
|
sz = config.nb_traces * (config.ipv6 ? sizeof(*w) : sizeof(*v));
|
||||||
config.traces = rte_zmalloc_socket(name, sz, CACHE_LINE_SIZE,
|
config.traces = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE,
|
||||||
SOCKET_ID_ANY);
|
SOCKET_ID_ANY);
|
||||||
if (config.traces == NULL)
|
if (config.traces == NULL)
|
||||||
rte_exit(EXIT_FAILURE, "Cannot allocate %zu bytes for "
|
rte_exit(EXIT_FAILURE, "Cannot allocate %zu bytes for "
|
||||||
|
@ -112,7 +112,7 @@ app_main_loop_worker(void) {
|
|||||||
rte_lcore_id());
|
rte_lcore_id());
|
||||||
|
|
||||||
worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
worker_mbuf = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
||||||
CACHE_LINE_SIZE, rte_socket_id());
|
RTE_CACHE_LINE_SIZE, rte_socket_id());
|
||||||
if (worker_mbuf == NULL)
|
if (worker_mbuf == NULL)
|
||||||
rte_panic("Worker thread: cannot allocate buffer space\n");
|
rte_panic("Worker thread: cannot allocate buffer space\n");
|
||||||
|
|
||||||
|
@ -444,7 +444,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
|
|||||||
mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
|
mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
|
||||||
mbuf_seg_size);
|
mbuf_seg_size);
|
||||||
mb_ctor_arg.seg_buf_offset =
|
mb_ctor_arg.seg_buf_offset =
|
||||||
(uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
|
(uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
|
||||||
mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
|
mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
|
||||||
mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
|
mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
|
||||||
mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
|
mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
|
||||||
@ -520,7 +520,7 @@ init_config(void)
|
|||||||
/* Configuration of logical cores. */
|
/* Configuration of logical cores. */
|
||||||
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
|
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
|
||||||
sizeof(struct fwd_lcore *) * nb_lcores,
|
sizeof(struct fwd_lcore *) * nb_lcores,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (fwd_lcores == NULL) {
|
if (fwd_lcores == NULL) {
|
||||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
|
||||||
"failed\n", nb_lcores);
|
"failed\n", nb_lcores);
|
||||||
@ -528,7 +528,7 @@ init_config(void)
|
|||||||
for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
|
for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
|
||||||
fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
|
fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
|
||||||
sizeof(struct fwd_lcore),
|
sizeof(struct fwd_lcore),
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (fwd_lcores[lc_id] == NULL) {
|
if (fwd_lcores[lc_id] == NULL) {
|
||||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
|
||||||
"failed\n");
|
"failed\n");
|
||||||
@ -566,7 +566,7 @@ init_config(void)
|
|||||||
/* Configuration of Ethernet ports. */
|
/* Configuration of Ethernet ports. */
|
||||||
ports = rte_zmalloc("testpmd: ports",
|
ports = rte_zmalloc("testpmd: ports",
|
||||||
sizeof(struct rte_port) * nb_ports,
|
sizeof(struct rte_port) * nb_ports,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (ports == NULL) {
|
if (ports == NULL) {
|
||||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) "
|
||||||
"failed\n", nb_ports);
|
"failed\n", nb_ports);
|
||||||
@ -637,7 +637,7 @@ reconfig(portid_t new_port_id, unsigned socket_id)
|
|||||||
/* Reconfiguration of Ethernet ports. */
|
/* Reconfiguration of Ethernet ports. */
|
||||||
ports = rte_realloc(ports,
|
ports = rte_realloc(ports,
|
||||||
sizeof(struct rte_port) * nb_ports,
|
sizeof(struct rte_port) * nb_ports,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (ports == NULL) {
|
if (ports == NULL) {
|
||||||
rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
|
rte_exit(EXIT_FAILURE, "rte_realloc(%d struct rte_port) failed\n",
|
||||||
nb_ports);
|
nb_ports);
|
||||||
@ -714,14 +714,14 @@ init_fwd_streams(void)
|
|||||||
/* init new */
|
/* init new */
|
||||||
nb_fwd_streams = nb_fwd_streams_new;
|
nb_fwd_streams = nb_fwd_streams_new;
|
||||||
fwd_streams = rte_zmalloc("testpmd: fwd_streams",
|
fwd_streams = rte_zmalloc("testpmd: fwd_streams",
|
||||||
sizeof(struct fwd_stream *) * nb_fwd_streams, CACHE_LINE_SIZE);
|
sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
|
||||||
if (fwd_streams == NULL)
|
if (fwd_streams == NULL)
|
||||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
|
||||||
"failed\n", nb_fwd_streams);
|
"failed\n", nb_fwd_streams);
|
||||||
|
|
||||||
for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
|
for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
|
||||||
fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
|
fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
|
||||||
sizeof(struct fwd_stream), CACHE_LINE_SIZE);
|
sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
|
||||||
if (fwd_streams[sm_id] == NULL)
|
if (fwd_streams[sm_id] == NULL)
|
||||||
rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
|
rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
|
||||||
" failed\n");
|
" failed\n");
|
||||||
|
@ -61,8 +61,8 @@
|
|||||||
|
|
||||||
#define DEF_MBUF_CACHE 250
|
#define DEF_MBUF_CACHE 250
|
||||||
|
|
||||||
#define CACHE_LINE_SIZE_ROUNDUP(size) \
|
#define RTE_CACHE_LINE_SIZE_ROUNDUP(size) \
|
||||||
(CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE))
|
(RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE))
|
||||||
|
|
||||||
#define NUMA_NO_CONFIG 0xFF
|
#define NUMA_NO_CONFIG 0xFF
|
||||||
#define UMA_NO_CONFIG 0xFF
|
#define UMA_NO_CONFIG 0xFF
|
||||||
|
@ -73,7 +73,7 @@ static void
|
|||||||
time_cache_line_switch(void)
|
time_cache_line_switch(void)
|
||||||
{
|
{
|
||||||
/* allocate a full cache line for data, we use only first byte of it */
|
/* allocate a full cache line for data, we use only first byte of it */
|
||||||
uint64_t data[CACHE_LINE_SIZE*3 / sizeof(uint64_t)];
|
uint64_t data[RTE_CACHE_LINE_SIZE*3 / sizeof(uint64_t)];
|
||||||
|
|
||||||
unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
|
unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
|
||||||
volatile uint64_t *pdata = &data[0];
|
volatile uint64_t *pdata = &data[0];
|
||||||
|
@ -136,13 +136,13 @@ test_ivshmem_create_lots_of_memzones(void)
|
|||||||
for (i = 0; i < RTE_LIBRTE_IVSHMEM_MAX_ENTRIES; i++) {
|
for (i = 0; i < RTE_LIBRTE_IVSHMEM_MAX_ENTRIES; i++) {
|
||||||
snprintf(name, sizeof(name), "mz_%i", i);
|
snprintf(name, sizeof(name), "mz_%i", i);
|
||||||
|
|
||||||
mz = rte_memzone_reserve(name, CACHE_LINE_SIZE, SOCKET_ID_ANY, 0);
|
mz = rte_memzone_reserve(name, RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY, 0);
|
||||||
ASSERT(mz != NULL, "Failed to reserve memzone");
|
ASSERT(mz != NULL, "Failed to reserve memzone");
|
||||||
|
|
||||||
ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) == 0,
|
ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) == 0,
|
||||||
"Failed to add memzone");
|
"Failed to add memzone");
|
||||||
}
|
}
|
||||||
mz = rte_memzone_reserve("one too many", CACHE_LINE_SIZE, SOCKET_ID_ANY, 0);
|
mz = rte_memzone_reserve("one too many", RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY, 0);
|
||||||
ASSERT(mz != NULL, "Failed to reserve memzone");
|
ASSERT(mz != NULL, "Failed to reserve memzone");
|
||||||
|
|
||||||
ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) < 0,
|
ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) < 0,
|
||||||
@ -159,7 +159,7 @@ test_ivshmem_create_duplicate_memzone(void)
|
|||||||
ASSERT(rte_ivshmem_metadata_create(METADATA_NAME) == 0,
|
ASSERT(rte_ivshmem_metadata_create(METADATA_NAME) == 0,
|
||||||
"Failed to create metadata");
|
"Failed to create metadata");
|
||||||
|
|
||||||
mz = rte_memzone_reserve("mz", CACHE_LINE_SIZE, SOCKET_ID_ANY, 0);
|
mz = rte_memzone_reserve("mz", RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY, 0);
|
||||||
ASSERT(mz != NULL, "Failed to reserve memzone");
|
ASSERT(mz != NULL, "Failed to reserve memzone");
|
||||||
|
|
||||||
ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) == 0,
|
ASSERT(rte_ivshmem_metadata_add_memzone(mz, METADATA_NAME) == 0,
|
||||||
|
@ -300,9 +300,9 @@ test_big_alloc(void)
|
|||||||
size_t size =rte_str_to_size(MALLOC_MEMZONE_SIZE)*2;
|
size_t size =rte_str_to_size(MALLOC_MEMZONE_SIZE)*2;
|
||||||
int align = 0;
|
int align = 0;
|
||||||
#ifndef RTE_LIBRTE_MALLOC_DEBUG
|
#ifndef RTE_LIBRTE_MALLOC_DEBUG
|
||||||
int overhead = CACHE_LINE_SIZE + CACHE_LINE_SIZE;
|
int overhead = RTE_CACHE_LINE_SIZE + RTE_CACHE_LINE_SIZE;
|
||||||
#else
|
#else
|
||||||
int overhead = CACHE_LINE_SIZE + CACHE_LINE_SIZE + CACHE_LINE_SIZE;
|
int overhead = RTE_CACHE_LINE_SIZE + RTE_CACHE_LINE_SIZE + RTE_CACHE_LINE_SIZE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
rte_malloc_get_socket_stats(socket, &pre_stats);
|
rte_malloc_get_socket_stats(socket, &pre_stats);
|
||||||
@ -356,9 +356,9 @@ test_multi_alloc_statistics(void)
|
|||||||
#ifndef RTE_LIBRTE_MALLOC_DEBUG
|
#ifndef RTE_LIBRTE_MALLOC_DEBUG
|
||||||
int trailer_size = 0;
|
int trailer_size = 0;
|
||||||
#else
|
#else
|
||||||
int trailer_size = CACHE_LINE_SIZE;
|
int trailer_size = RTE_CACHE_LINE_SIZE;
|
||||||
#endif
|
#endif
|
||||||
int overhead = CACHE_LINE_SIZE + trailer_size;
|
int overhead = RTE_CACHE_LINE_SIZE + trailer_size;
|
||||||
|
|
||||||
rte_malloc_get_socket_stats(socket, &pre_stats);
|
rte_malloc_get_socket_stats(socket, &pre_stats);
|
||||||
|
|
||||||
@ -481,13 +481,13 @@ test_realloc(void)
|
|||||||
const unsigned size4 = size3 + 1024;
|
const unsigned size4 = size3 + 1024;
|
||||||
|
|
||||||
/* test data is the same even if element is moved*/
|
/* test data is the same even if element is moved*/
|
||||||
char *ptr1 = rte_zmalloc(NULL, size1, CACHE_LINE_SIZE);
|
char *ptr1 = rte_zmalloc(NULL, size1, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr1){
|
if (!ptr1){
|
||||||
printf("NULL pointer returned from rte_zmalloc\n");
|
printf("NULL pointer returned from rte_zmalloc\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
snprintf(ptr1, size1, "%s" ,hello_str);
|
snprintf(ptr1, size1, "%s" ,hello_str);
|
||||||
char *ptr2 = rte_realloc(ptr1, size2, CACHE_LINE_SIZE);
|
char *ptr2 = rte_realloc(ptr1, size2, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr2){
|
if (!ptr2){
|
||||||
rte_free(ptr1);
|
rte_free(ptr1);
|
||||||
printf("NULL pointer returned from rte_realloc\n");
|
printf("NULL pointer returned from rte_realloc\n");
|
||||||
@ -511,7 +511,7 @@ test_realloc(void)
|
|||||||
/* now allocate third element, free the second
|
/* now allocate third element, free the second
|
||||||
* and resize third. It should not move. (ptr1 is now invalid)
|
* and resize third. It should not move. (ptr1 is now invalid)
|
||||||
*/
|
*/
|
||||||
char *ptr3 = rte_zmalloc(NULL, size3, CACHE_LINE_SIZE);
|
char *ptr3 = rte_zmalloc(NULL, size3, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr3){
|
if (!ptr3){
|
||||||
printf("NULL pointer returned from rte_zmalloc\n");
|
printf("NULL pointer returned from rte_zmalloc\n");
|
||||||
rte_free(ptr2);
|
rte_free(ptr2);
|
||||||
@ -526,7 +526,7 @@ test_realloc(void)
|
|||||||
}
|
}
|
||||||
rte_free(ptr2);
|
rte_free(ptr2);
|
||||||
/* first resize to half the size of the freed block */
|
/* first resize to half the size of the freed block */
|
||||||
char *ptr4 = rte_realloc(ptr3, size4, CACHE_LINE_SIZE);
|
char *ptr4 = rte_realloc(ptr3, size4, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr4){
|
if (!ptr4){
|
||||||
printf("NULL pointer returned from rte_realloc\n");
|
printf("NULL pointer returned from rte_realloc\n");
|
||||||
rte_free(ptr3);
|
rte_free(ptr3);
|
||||||
@ -538,7 +538,7 @@ test_realloc(void)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
/* now resize again to the full size of the freed block */
|
/* now resize again to the full size of the freed block */
|
||||||
ptr4 = rte_realloc(ptr3, size3 + size2 + size1, CACHE_LINE_SIZE);
|
ptr4 = rte_realloc(ptr3, size3 + size2 + size1, RTE_CACHE_LINE_SIZE);
|
||||||
if (ptr3 != ptr4){
|
if (ptr3 != ptr4){
|
||||||
printf("Unexpected - ptr4 != ptr3 on second resize\n");
|
printf("Unexpected - ptr4 != ptr3 on second resize\n");
|
||||||
rte_free(ptr4);
|
rte_free(ptr4);
|
||||||
@ -549,12 +549,12 @@ test_realloc(void)
|
|||||||
/* now try a resize to a smaller size, see if it works */
|
/* now try a resize to a smaller size, see if it works */
|
||||||
const unsigned size5 = 1024;
|
const unsigned size5 = 1024;
|
||||||
const unsigned size6 = size5 / 2;
|
const unsigned size6 = size5 / 2;
|
||||||
char *ptr5 = rte_malloc(NULL, size5, CACHE_LINE_SIZE);
|
char *ptr5 = rte_malloc(NULL, size5, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr5){
|
if (!ptr5){
|
||||||
printf("NULL pointer returned from rte_malloc\n");
|
printf("NULL pointer returned from rte_malloc\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
char *ptr6 = rte_realloc(ptr5, size6, CACHE_LINE_SIZE);
|
char *ptr6 = rte_realloc(ptr5, size6, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr6){
|
if (!ptr6){
|
||||||
printf("NULL pointer returned from rte_realloc\n");
|
printf("NULL pointer returned from rte_realloc\n");
|
||||||
rte_free(ptr5);
|
rte_free(ptr5);
|
||||||
@ -569,8 +569,8 @@ test_realloc(void)
|
|||||||
|
|
||||||
/* check for behaviour changing alignment */
|
/* check for behaviour changing alignment */
|
||||||
const unsigned size7 = 1024;
|
const unsigned size7 = 1024;
|
||||||
const unsigned orig_align = CACHE_LINE_SIZE;
|
const unsigned orig_align = RTE_CACHE_LINE_SIZE;
|
||||||
unsigned new_align = CACHE_LINE_SIZE * 2;
|
unsigned new_align = RTE_CACHE_LINE_SIZE * 2;
|
||||||
char *ptr7 = rte_malloc(NULL, size7, orig_align);
|
char *ptr7 = rte_malloc(NULL, size7, orig_align);
|
||||||
if (!ptr7){
|
if (!ptr7){
|
||||||
printf("NULL pointer returned from rte_malloc\n");
|
printf("NULL pointer returned from rte_malloc\n");
|
||||||
@ -597,18 +597,18 @@ test_realloc(void)
|
|||||||
*/
|
*/
|
||||||
unsigned size9 = 1024, size10 = 1024;
|
unsigned size9 = 1024, size10 = 1024;
|
||||||
unsigned size11 = size9 + size10 + 256;
|
unsigned size11 = size9 + size10 + 256;
|
||||||
char *ptr9 = rte_malloc(NULL, size9, CACHE_LINE_SIZE);
|
char *ptr9 = rte_malloc(NULL, size9, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr9){
|
if (!ptr9){
|
||||||
printf("NULL pointer returned from rte_malloc\n");
|
printf("NULL pointer returned from rte_malloc\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
char *ptr10 = rte_malloc(NULL, size10, CACHE_LINE_SIZE);
|
char *ptr10 = rte_malloc(NULL, size10, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr10){
|
if (!ptr10){
|
||||||
printf("NULL pointer returned from rte_malloc\n");
|
printf("NULL pointer returned from rte_malloc\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
rte_free(ptr9);
|
rte_free(ptr9);
|
||||||
char *ptr11 = rte_realloc(ptr10, size11, CACHE_LINE_SIZE);
|
char *ptr11 = rte_realloc(ptr10, size11, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr11){
|
if (!ptr11){
|
||||||
printf("NULL pointer returned from rte_realloc\n");
|
printf("NULL pointer returned from rte_realloc\n");
|
||||||
rte_free(ptr10);
|
rte_free(ptr10);
|
||||||
@ -625,7 +625,7 @@ test_realloc(void)
|
|||||||
* We should get a malloc of the size requested*/
|
* We should get a malloc of the size requested*/
|
||||||
const size_t size12 = 1024;
|
const size_t size12 = 1024;
|
||||||
size_t size12_check;
|
size_t size12_check;
|
||||||
char *ptr12 = rte_realloc(NULL, size12, CACHE_LINE_SIZE);
|
char *ptr12 = rte_realloc(NULL, size12, RTE_CACHE_LINE_SIZE);
|
||||||
if (!ptr12){
|
if (!ptr12){
|
||||||
printf("NULL pointer returned from rte_realloc\n");
|
printf("NULL pointer returned from rte_realloc\n");
|
||||||
return -1;
|
return -1;
|
||||||
@ -698,7 +698,7 @@ test_rte_malloc_validate(void)
|
|||||||
{
|
{
|
||||||
const size_t request_size = 1024;
|
const size_t request_size = 1024;
|
||||||
size_t allocated_size;
|
size_t allocated_size;
|
||||||
char *data_ptr = rte_malloc(NULL, request_size, CACHE_LINE_SIZE);
|
char *data_ptr = rte_malloc(NULL, request_size, RTE_CACHE_LINE_SIZE);
|
||||||
#ifdef RTE_LIBRTE_MALLOC_DEBUG
|
#ifdef RTE_LIBRTE_MALLOC_DEBUG
|
||||||
int retval;
|
int retval;
|
||||||
char *over_write_vals = NULL;
|
char *over_write_vals = NULL;
|
||||||
@ -773,7 +773,7 @@ test_zero_aligned_alloc(void)
|
|||||||
char *p1 = rte_malloc(NULL,1024, 0);
|
char *p1 = rte_malloc(NULL,1024, 0);
|
||||||
if (!p1)
|
if (!p1)
|
||||||
goto err_return;
|
goto err_return;
|
||||||
if (!rte_is_aligned(p1, CACHE_LINE_SIZE))
|
if (!rte_is_aligned(p1, RTE_CACHE_LINE_SIZE))
|
||||||
goto err_return;
|
goto err_return;
|
||||||
rte_free(p1);
|
rte_free(p1);
|
||||||
return 0;
|
return 0;
|
||||||
@ -789,7 +789,7 @@ test_malloc_bad_params(void)
|
|||||||
{
|
{
|
||||||
const char *type = NULL;
|
const char *type = NULL;
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
unsigned align = CACHE_LINE_SIZE;
|
unsigned align = RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
/* rte_malloc expected to return null with inappropriate size */
|
/* rte_malloc expected to return null with inappropriate size */
|
||||||
char *bad_ptr = rte_malloc(type, size, align);
|
char *bad_ptr = rte_malloc(type, size, align);
|
||||||
|
@ -770,7 +770,7 @@ test_failing_mbuf_sanity_check(void)
|
|||||||
static int
|
static int
|
||||||
test_mbuf(void)
|
test_mbuf(void)
|
||||||
{
|
{
|
||||||
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != CACHE_LINE_SIZE * 2);
|
RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != RTE_CACHE_LINE_SIZE * 2);
|
||||||
|
|
||||||
/* create pktmbuf pool if it does not exist */
|
/* create pktmbuf pool if it does not exist */
|
||||||
if (pktmbuf_pool == NULL) {
|
if (pktmbuf_pool == NULL) {
|
||||||
|
@ -402,9 +402,9 @@ test_memzone_reserve_max(void)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* align everything */
|
/* align everything */
|
||||||
last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
|
last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE);
|
||||||
len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
|
len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
|
||||||
len &= ~((size_t) CACHE_LINE_MASK);
|
len &= ~((size_t) RTE_CACHE_LINE_MASK);
|
||||||
|
|
||||||
/* cycle through all memzones */
|
/* cycle through all memzones */
|
||||||
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
|
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
|
||||||
@ -495,9 +495,9 @@ test_memzone_reserve_max_aligned(void)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* align everything */
|
/* align everything */
|
||||||
last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, CACHE_LINE_SIZE);
|
last_addr = RTE_PTR_ALIGN_CEIL(ms[memseg_idx].addr, RTE_CACHE_LINE_SIZE);
|
||||||
len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
|
len = ms[memseg_idx].len - RTE_PTR_DIFF(last_addr, ms[memseg_idx].addr);
|
||||||
len &= ~((size_t) CACHE_LINE_MASK);
|
len &= ~((size_t) RTE_CACHE_LINE_MASK);
|
||||||
|
|
||||||
/* cycle through all memzones */
|
/* cycle through all memzones */
|
||||||
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
|
for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
|
||||||
@ -595,11 +595,11 @@ test_memzone_aligned(void)
|
|||||||
printf("Unable to reserve 64-byte aligned memzone!\n");
|
printf("Unable to reserve 64-byte aligned memzone!\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0)
|
if ((memzone_aligned_32->phys_addr & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0)
|
if (((uintptr_t) memzone_aligned_32->addr & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0)
|
if ((memzone_aligned_32->len & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (memzone_aligned_128 == NULL) {
|
if (memzone_aligned_128 == NULL) {
|
||||||
@ -610,7 +610,7 @@ test_memzone_aligned(void)
|
|||||||
return -1;
|
return -1;
|
||||||
if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
|
if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0)
|
if ((memzone_aligned_128->len & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (memzone_aligned_256 == NULL) {
|
if (memzone_aligned_256 == NULL) {
|
||||||
@ -621,7 +621,7 @@ test_memzone_aligned(void)
|
|||||||
return -1;
|
return -1;
|
||||||
if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
|
if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0)
|
if ((memzone_aligned_256->len & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (memzone_aligned_512 == NULL) {
|
if (memzone_aligned_512 == NULL) {
|
||||||
@ -632,7 +632,7 @@ test_memzone_aligned(void)
|
|||||||
return -1;
|
return -1;
|
||||||
if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
|
if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0)
|
if ((memzone_aligned_512->len & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (memzone_aligned_1024 == NULL) {
|
if (memzone_aligned_1024 == NULL) {
|
||||||
@ -643,7 +643,7 @@ test_memzone_aligned(void)
|
|||||||
return -1;
|
return -1;
|
||||||
if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
|
if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0)
|
if ((memzone_aligned_1024->len & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
/* check that zones don't overlap */
|
/* check that zones don't overlap */
|
||||||
@ -709,8 +709,8 @@ check_memzone_bounded(const char *name, uint32_t len, uint32_t align,
|
|||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((mz->len & CACHE_LINE_MASK) != 0 || mz->len < len ||
|
if ((mz->len & RTE_CACHE_LINE_MASK) != 0 || mz->len < len ||
|
||||||
mz->len < CACHE_LINE_SIZE) {
|
mz->len < RTE_CACHE_LINE_SIZE) {
|
||||||
printf("%s(%s): invalid length\n",
|
printf("%s(%s): invalid length\n",
|
||||||
__func__, mz->name);
|
__func__, mz->name);
|
||||||
return (-1);
|
return (-1);
|
||||||
@ -811,14 +811,14 @@ test_memzone_reserve_memory_in_smallest_segment(void)
|
|||||||
prev_min_len = prev_min_ms->len;
|
prev_min_len = prev_min_ms->len;
|
||||||
|
|
||||||
/* try reserving a memzone in the smallest memseg */
|
/* try reserving a memzone in the smallest memseg */
|
||||||
mz = rte_memzone_reserve("smallest_mz", CACHE_LINE_SIZE,
|
mz = rte_memzone_reserve("smallest_mz", RTE_CACHE_LINE_SIZE,
|
||||||
SOCKET_ID_ANY, 0);
|
SOCKET_ID_ANY, 0);
|
||||||
if (mz == NULL) {
|
if (mz == NULL) {
|
||||||
printf("Failed to reserve memory from smallest memseg!\n");
|
printf("Failed to reserve memory from smallest memseg!\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (prev_min_ms->len != prev_min_len &&
|
if (prev_min_ms->len != prev_min_len &&
|
||||||
min_ms->len != min_len - CACHE_LINE_SIZE) {
|
min_ms->len != min_len - RTE_CACHE_LINE_SIZE) {
|
||||||
printf("Reserved memory from wrong memseg!\n");
|
printf("Reserved memory from wrong memseg!\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -857,7 +857,7 @@ test_memzone_reserve_memory_with_smallest_offset(void)
|
|||||||
|
|
||||||
min_ms = NULL; /*< smallest segment */
|
min_ms = NULL; /*< smallest segment */
|
||||||
prev_min_ms = NULL; /*< second smallest segment */
|
prev_min_ms = NULL; /*< second smallest segment */
|
||||||
align = CACHE_LINE_SIZE * 4;
|
align = RTE_CACHE_LINE_SIZE * 4;
|
||||||
|
|
||||||
/* find two smallest segments */
|
/* find two smallest segments */
|
||||||
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
|
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
|
||||||
@ -897,7 +897,7 @@ test_memzone_reserve_memory_with_smallest_offset(void)
|
|||||||
|
|
||||||
/* make sure final length is *not* aligned */
|
/* make sure final length is *not* aligned */
|
||||||
while (((min_ms->addr_64 + len) & (align-1)) == 0)
|
while (((min_ms->addr_64 + len) & (align-1)) == 0)
|
||||||
len += CACHE_LINE_SIZE;
|
len += RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
if (rte_memzone_reserve("dummy_mz1", len, SOCKET_ID_ANY, 0) == NULL) {
|
if (rte_memzone_reserve("dummy_mz1", len, SOCKET_ID_ANY, 0) == NULL) {
|
||||||
printf("Cannot reserve memory!\n");
|
printf("Cannot reserve memory!\n");
|
||||||
@ -912,12 +912,12 @@ test_memzone_reserve_memory_with_smallest_offset(void)
|
|||||||
}
|
}
|
||||||
/* if we don't need to touch smallest segment but it's aligned */
|
/* if we don't need to touch smallest segment but it's aligned */
|
||||||
else if ((min_ms->addr_64 & (align-1)) == 0) {
|
else if ((min_ms->addr_64 & (align-1)) == 0) {
|
||||||
if (rte_memzone_reserve("align_mz1", CACHE_LINE_SIZE,
|
if (rte_memzone_reserve("align_mz1", RTE_CACHE_LINE_SIZE,
|
||||||
SOCKET_ID_ANY, 0) == NULL) {
|
SOCKET_ID_ANY, 0) == NULL) {
|
||||||
printf("Cannot reserve memory!\n");
|
printf("Cannot reserve memory!\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (min_ms->len != min_len - CACHE_LINE_SIZE) {
|
if (min_ms->len != min_len - RTE_CACHE_LINE_SIZE) {
|
||||||
printf("Reserved memory from wrong segment!\n");
|
printf("Reserved memory from wrong segment!\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -929,7 +929,7 @@ test_memzone_reserve_memory_with_smallest_offset(void)
|
|||||||
|
|
||||||
/* make sure final length is aligned */
|
/* make sure final length is aligned */
|
||||||
while (((prev_min_ms->addr_64 + len) & (align-1)) != 0)
|
while (((prev_min_ms->addr_64 + len) & (align-1)) != 0)
|
||||||
len += CACHE_LINE_SIZE;
|
len += RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
if (rte_memzone_reserve("dummy_mz2", len, SOCKET_ID_ANY, 0) == NULL) {
|
if (rte_memzone_reserve("dummy_mz2", len, SOCKET_ID_ANY, 0) == NULL) {
|
||||||
printf("Cannot reserve memory!\n");
|
printf("Cannot reserve memory!\n");
|
||||||
@ -942,7 +942,7 @@ test_memzone_reserve_memory_with_smallest_offset(void)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
len = CACHE_LINE_SIZE;
|
len = RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -980,7 +980,7 @@ test_memzone_reserve_remainder(void)
|
|||||||
int i, align;
|
int i, align;
|
||||||
|
|
||||||
min_len = 0;
|
min_len = 0;
|
||||||
align = CACHE_LINE_SIZE;
|
align = RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
config = rte_eal_get_configuration();
|
config = rte_eal_get_configuration();
|
||||||
|
|
||||||
@ -998,7 +998,7 @@ test_memzone_reserve_remainder(void)
|
|||||||
min_ms = ms;
|
min_ms = ms;
|
||||||
|
|
||||||
/* find maximum alignment this segment is able to hold */
|
/* find maximum alignment this segment is able to hold */
|
||||||
align = CACHE_LINE_SIZE;
|
align = RTE_CACHE_LINE_SIZE;
|
||||||
while ((ms->addr_64 & (align-1)) == 0) {
|
while ((ms->addr_64 & (align-1)) == 0) {
|
||||||
align <<= 1;
|
align <<= 1;
|
||||||
}
|
}
|
||||||
@ -1072,17 +1072,17 @@ test_memzone(void)
|
|||||||
/* check cache-line alignments */
|
/* check cache-line alignments */
|
||||||
printf("check alignments and lengths\n");
|
printf("check alignments and lengths\n");
|
||||||
|
|
||||||
if ((memzone1->phys_addr & CACHE_LINE_MASK) != 0)
|
if ((memzone1->phys_addr & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone2->phys_addr & CACHE_LINE_MASK) != 0)
|
if ((memzone2->phys_addr & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (memzone3 != NULL && (memzone3->phys_addr & CACHE_LINE_MASK) != 0)
|
if (memzone3 != NULL && (memzone3->phys_addr & RTE_CACHE_LINE_MASK) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone1->len & CACHE_LINE_MASK) != 0 || memzone1->len == 0)
|
if ((memzone1->len & RTE_CACHE_LINE_MASK) != 0 || memzone1->len == 0)
|
||||||
return -1;
|
return -1;
|
||||||
if ((memzone2->len & CACHE_LINE_MASK) != 0 || memzone2->len == 0)
|
if ((memzone2->len & RTE_CACHE_LINE_MASK) != 0 || memzone2->len == 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 ||
|
if (memzone3 != NULL && ((memzone3->len & RTE_CACHE_LINE_MASK) != 0 ||
|
||||||
memzone3->len == 0))
|
memzone3->len == 0))
|
||||||
return -1;
|
return -1;
|
||||||
if (memzone4->len != 1024)
|
if (memzone4->len != 1024)
|
||||||
|
@ -592,7 +592,7 @@ poll_burst(void *args)
|
|||||||
pkts_burst = (struct rte_mbuf **)
|
pkts_burst = (struct rte_mbuf **)
|
||||||
rte_calloc_socket("poll_burst",
|
rte_calloc_socket("poll_burst",
|
||||||
total, sizeof(void *),
|
total, sizeof(void *),
|
||||||
CACHE_LINE_SIZE, conf->socketid);
|
RTE_CACHE_LINE_SIZE, conf->socketid);
|
||||||
if (!pkts_burst)
|
if (!pkts_burst)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
@ -797,7 +797,7 @@ test_pmd_perf(void)
|
|||||||
rte_calloc_socket("tx_buff",
|
rte_calloc_socket("tx_buff",
|
||||||
MAX_TRAFFIC_BURST * nb_ports,
|
MAX_TRAFFIC_BURST * nb_ports,
|
||||||
sizeof(void *),
|
sizeof(void *),
|
||||||
CACHE_LINE_SIZE, socketid);
|
RTE_CACHE_LINE_SIZE, socketid);
|
||||||
if (!tx_burst)
|
if (!tx_burst)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ struct rte_table {
|
|||||||
rte_pipeline_table_action_handler_hit f_action;
|
rte_pipeline_table_action_handler_hit f_action;
|
||||||
uint32_t table_next_id;
|
uint32_t table_next_id;
|
||||||
uint32_t table_next_id_valid;
|
uint32_t table_next_id_valid;
|
||||||
uint8_t actions_lookup_miss[CACHE_LINE_SIZE];
|
uint8_t actions_lookup_miss[RTE_CACHE_LINE_SIZE];
|
||||||
uint32_t action_data_size;
|
uint32_t action_data_size;
|
||||||
void *h_table;
|
void *h_table;
|
||||||
};
|
};
|
||||||
|
@ -392,7 +392,7 @@ The code is as follows:
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
kni_port_params_array[port_id] = (struct kni_port_params*)rte_zmalloc("KNI_port_params", sizeof(struct kni_port_params), CACHE_LINE_SIZE);
|
kni_port_params_array[port_id] = (struct kni_port_params*)rte_zmalloc("KNI_port_params", sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
|
||||||
kni_port_params_array[port_id]->port_id = port_id;
|
kni_port_params_array[port_id]->port_id = port_id;
|
||||||
kni_port_params_array[port_id]->lcore_rx = (uint8_t)int_fld[i++];
|
kni_port_params_array[port_id]->lcore_rx = (uint8_t)int_fld[i++];
|
||||||
kni_port_params_array[port_id]->lcore_tx = (uint8_t)int_fld[i++];
|
kni_port_params_array[port_id]->lcore_tx = (uint8_t)int_fld[i++];
|
||||||
|
@ -339,7 +339,7 @@ get_crypto_instance_on_core(CpaInstanceHandle *pInstanceHandle,
|
|||||||
}
|
}
|
||||||
|
|
||||||
pLocalInstanceHandles = rte_malloc("pLocalInstanceHandles",
|
pLocalInstanceHandles = rte_malloc("pLocalInstanceHandles",
|
||||||
sizeof(CpaInstanceHandle) * numInstances, CACHE_LINE_SIZE);
|
sizeof(CpaInstanceHandle) * numInstances, RTE_CACHE_LINE_SIZE);
|
||||||
|
|
||||||
if (NULL == pLocalInstanceHandles) {
|
if (NULL == pLocalInstanceHandles) {
|
||||||
return CPA_STATUS_FAIL;
|
return CPA_STATUS_FAIL;
|
||||||
|
@ -568,7 +568,7 @@ cmd_arp_add_parsed(
|
|||||||
struct app_rule *new_rule = (struct app_rule *)
|
struct app_rule *new_rule = (struct app_rule *)
|
||||||
rte_zmalloc_socket("CLI",
|
rte_zmalloc_socket("CLI",
|
||||||
sizeof(struct app_rule),
|
sizeof(struct app_rule),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
rte_socket_id());
|
rte_socket_id());
|
||||||
|
|
||||||
if (new_rule == NULL)
|
if (new_rule == NULL)
|
||||||
@ -860,7 +860,7 @@ cmd_route_add_parsed(
|
|||||||
struct app_rule *new_rule = (struct app_rule *)
|
struct app_rule *new_rule = (struct app_rule *)
|
||||||
rte_zmalloc_socket("CLI",
|
rte_zmalloc_socket("CLI",
|
||||||
sizeof(struct app_rule),
|
sizeof(struct app_rule),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
rte_socket_id());
|
rte_socket_id());
|
||||||
|
|
||||||
if (new_rule == NULL)
|
if (new_rule == NULL)
|
||||||
@ -1193,7 +1193,7 @@ cmd_firewall_add_parsed(
|
|||||||
struct app_rule *new_rule = (struct app_rule *)
|
struct app_rule *new_rule = (struct app_rule *)
|
||||||
rte_zmalloc_socket("CLI",
|
rte_zmalloc_socket("CLI",
|
||||||
sizeof(struct app_rule),
|
sizeof(struct app_rule),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
rte_socket_id());
|
rte_socket_id());
|
||||||
|
|
||||||
memcpy(new_rule, &rule, sizeof(rule));
|
memcpy(new_rule, &rule, sizeof(rule));
|
||||||
@ -1673,7 +1673,7 @@ cmd_flow_add_parsed(
|
|||||||
struct app_rule *new_rule = (struct app_rule *)
|
struct app_rule *new_rule = (struct app_rule *)
|
||||||
rte_zmalloc_socket("CLI",
|
rte_zmalloc_socket("CLI",
|
||||||
sizeof(struct app_rule),
|
sizeof(struct app_rule),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
rte_socket_id());
|
rte_socket_id());
|
||||||
|
|
||||||
if (new_rule == NULL)
|
if (new_rule == NULL)
|
||||||
|
@ -419,7 +419,7 @@ app_init_rings(void)
|
|||||||
RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq);
|
RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq);
|
||||||
|
|
||||||
app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *),
|
app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *),
|
||||||
CACHE_LINE_SIZE, rte_socket_id());
|
RTE_CACHE_LINE_SIZE, rte_socket_id());
|
||||||
if (app.rings == NULL)
|
if (app.rings == NULL)
|
||||||
rte_panic("Cannot allocate memory to store ring pointers\n");
|
rte_panic("Cannot allocate memory to store ring pointers\n");
|
||||||
|
|
||||||
@ -595,7 +595,7 @@ app_init_etc(void)
|
|||||||
void
|
void
|
||||||
app_init(void)
|
app_init(void)
|
||||||
{
|
{
|
||||||
if ((sizeof(struct app_pkt_metadata) % CACHE_LINE_SIZE) != 0)
|
if ((sizeof(struct app_pkt_metadata) % RTE_CACHE_LINE_SIZE) != 0)
|
||||||
rte_panic("Application pkt meta-data size mismatch\n");
|
rte_panic("Application pkt meta-data size mismatch\n");
|
||||||
|
|
||||||
app_check_core_params();
|
app_check_core_params();
|
||||||
|
@ -188,7 +188,7 @@ app_main_loop_passthrough(void) {
|
|||||||
core_id);
|
core_id);
|
||||||
|
|
||||||
m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
m = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
||||||
CACHE_LINE_SIZE, rte_socket_id());
|
RTE_CACHE_LINE_SIZE, rte_socket_id());
|
||||||
if (m == NULL)
|
if (m == NULL)
|
||||||
rte_panic("%s: cannot allocate buffer space\n", __func__);
|
rte_panic("%s: cannot allocate buffer space\n", __func__);
|
||||||
|
|
||||||
|
@ -295,7 +295,7 @@ app_main_loop_rx(void) {
|
|||||||
RTE_LOG(INFO, USER1, "Core %u is doing RX (no pipeline)\n", core_id);
|
RTE_LOG(INFO, USER1, "Core %u is doing RX (no pipeline)\n", core_id);
|
||||||
|
|
||||||
ma = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
ma = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
||||||
CACHE_LINE_SIZE, rte_socket_id());
|
RTE_CACHE_LINE_SIZE, rte_socket_id());
|
||||||
if (ma == NULL)
|
if (ma == NULL)
|
||||||
rte_panic("%s: cannot allocate buffer space\n", __func__);
|
rte_panic("%s: cannot allocate buffer space\n", __func__);
|
||||||
|
|
||||||
|
@ -234,7 +234,7 @@ app_main_loop_tx(void) {
|
|||||||
|
|
||||||
for (i = 0; i < APP_MAX_PORTS; i++) {
|
for (i = 0; i < APP_MAX_PORTS; i++) {
|
||||||
m[i] = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
m[i] = rte_malloc_socket(NULL, sizeof(struct app_mbuf_array),
|
||||||
CACHE_LINE_SIZE, rte_socket_id());
|
RTE_CACHE_LINE_SIZE, rte_socket_id());
|
||||||
if (m[i] == NULL)
|
if (m[i] == NULL)
|
||||||
rte_panic("%s: Cannot allocate buffer space\n",
|
rte_panic("%s: Cannot allocate buffer space\n",
|
||||||
__func__);
|
__func__);
|
||||||
|
@ -858,7 +858,7 @@ setup_port_tbl(struct lcore_queue_conf *qconf, uint32_t lcore, int socket,
|
|||||||
n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
|
n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
|
||||||
sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
|
sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
|
||||||
|
|
||||||
if ((mtb = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
|
if ((mtb = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
|
||||||
socket)) == NULL) {
|
socket)) == NULL) {
|
||||||
RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
|
RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
|
||||||
"failed to allocate %zu bytes\n",
|
"failed to allocate %zu bytes\n",
|
||||||
|
@ -463,7 +463,7 @@ parse_config(const char *arg)
|
|||||||
}
|
}
|
||||||
kni_port_params_array[port_id] =
|
kni_port_params_array[port_id] =
|
||||||
(struct kni_port_params*)rte_zmalloc("KNI_port_params",
|
(struct kni_port_params*)rte_zmalloc("KNI_port_params",
|
||||||
sizeof(struct kni_port_params), CACHE_LINE_SIZE);
|
sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
|
||||||
kni_port_params_array[port_id]->port_id = port_id;
|
kni_port_params_array[port_id]->port_id = port_id;
|
||||||
kni_port_params_array[port_id]->lcore_rx =
|
kni_port_params_array[port_id]->lcore_rx =
|
||||||
(uint8_t)int_fld[i++];
|
(uint8_t)int_fld[i++];
|
||||||
|
@ -180,7 +180,7 @@ lcore_id_init(void)
|
|||||||
/* Setup lcore ID allocation map */
|
/* Setup lcore ID allocation map */
|
||||||
lcore_cfg = rte_zmalloc("LCORE_ID_MAP",
|
lcore_cfg = rte_zmalloc("LCORE_ID_MAP",
|
||||||
sizeof(uint16_t) * RTE_MAX_LCORE,
|
sizeof(uint16_t) * RTE_MAX_LCORE,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
|
|
||||||
if(lcore_cfg == NULL)
|
if(lcore_cfg == NULL)
|
||||||
rte_panic("Failed to malloc\n");
|
rte_panic("Failed to malloc\n");
|
||||||
@ -300,7 +300,7 @@ flib_init(void)
|
|||||||
{
|
{
|
||||||
if ((core_cfg = rte_zmalloc("core_cfg",
|
if ((core_cfg = rte_zmalloc("core_cfg",
|
||||||
sizeof(struct lcore_stat) * RTE_MAX_LCORE,
|
sizeof(struct lcore_stat) * RTE_MAX_LCORE,
|
||||||
CACHE_LINE_SIZE)) == NULL ) {
|
RTE_CACHE_LINE_SIZE)) == NULL ) {
|
||||||
printf("rte_zmalloc failed\n");
|
printf("rte_zmalloc failed\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ struct port_stats{
|
|||||||
unsigned rx;
|
unsigned rx;
|
||||||
unsigned tx;
|
unsigned tx;
|
||||||
unsigned drop;
|
unsigned drop;
|
||||||
} __attribute__((aligned(CACHE_LINE_SIZE / 2)));
|
} __attribute__((aligned(RTE_CACHE_LINE_SIZE / 2)));
|
||||||
|
|
||||||
static int proc_id = -1;
|
static int proc_id = -1;
|
||||||
static unsigned num_procs = 0;
|
static unsigned num_procs = 0;
|
||||||
|
@ -643,12 +643,12 @@ rte_netmap_init(const struct rte_netmap_conf *conf)
|
|||||||
nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
|
nmif_sz = NETMAP_IF_RING_OFS(port_rings, port_rings, port_slots);
|
||||||
sz = nmif_sz * port_num;
|
sz = nmif_sz * port_num;
|
||||||
|
|
||||||
buf_ofs = RTE_ALIGN_CEIL(sz, CACHE_LINE_SIZE);
|
buf_ofs = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
|
||||||
sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;
|
sz = buf_ofs + port_bufs * conf->max_bufsz * port_num;
|
||||||
|
|
||||||
if (sz > UINT32_MAX ||
|
if (sz > UINT32_MAX ||
|
||||||
(netmap.mem = rte_zmalloc_socket(__func__, sz,
|
(netmap.mem = rte_zmalloc_socket(__func__, sz,
|
||||||
CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
|
RTE_CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
|
||||||
RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
|
RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
|
||||||
__func__, sz);
|
__func__, sz);
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
@ -135,7 +135,7 @@ app_main_loop(__attribute__((unused))void *dummy)
|
|||||||
else if (mode == (APP_TX_MODE | APP_WT_MODE)) {
|
else if (mode == (APP_TX_MODE | APP_WT_MODE)) {
|
||||||
for (i = 0; i < wt_idx; i++) {
|
for (i = 0; i < wt_idx; i++) {
|
||||||
wt_confs[i]->m_table = rte_malloc("table_wt", sizeof(struct rte_mbuf *)
|
wt_confs[i]->m_table = rte_malloc("table_wt", sizeof(struct rte_mbuf *)
|
||||||
* burst_conf.tx_burst, CACHE_LINE_SIZE);
|
* burst_conf.tx_burst, RTE_CACHE_LINE_SIZE);
|
||||||
|
|
||||||
if (wt_confs[i]->m_table == NULL)
|
if (wt_confs[i]->m_table == NULL)
|
||||||
rte_panic("flow %u unable to allocate memory buffer\n", i);
|
rte_panic("flow %u unable to allocate memory buffer\n", i);
|
||||||
@ -150,7 +150,7 @@ app_main_loop(__attribute__((unused))void *dummy)
|
|||||||
else if (mode == APP_TX_MODE) {
|
else if (mode == APP_TX_MODE) {
|
||||||
for (i = 0; i < tx_idx; i++) {
|
for (i = 0; i < tx_idx; i++) {
|
||||||
tx_confs[i]->m_table = rte_malloc("table_tx", sizeof(struct rte_mbuf *)
|
tx_confs[i]->m_table = rte_malloc("table_tx", sizeof(struct rte_mbuf *)
|
||||||
* burst_conf.tx_burst, CACHE_LINE_SIZE);
|
* burst_conf.tx_burst, RTE_CACHE_LINE_SIZE);
|
||||||
|
|
||||||
if (tx_confs[i]->m_table == NULL)
|
if (tx_confs[i]->m_table == NULL)
|
||||||
rte_panic("flow %u unable to allocate memory buffer\n", i);
|
rte_panic("flow %u unable to allocate memory buffer\n", i);
|
||||||
|
@ -156,7 +156,7 @@
|
|||||||
#define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
|
#define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
|
||||||
|
|
||||||
/* Number of descriptors per cacheline. */
|
/* Number of descriptors per cacheline. */
|
||||||
#define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))
|
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
|
||||||
|
|
||||||
/* mask of enabled ports */
|
/* mask of enabled ports */
|
||||||
static uint32_t enabled_port_mask = 0;
|
static uint32_t enabled_port_mask = 0;
|
||||||
@ -2562,7 +2562,7 @@ new_device (struct virtio_net *dev)
|
|||||||
struct vhost_dev *vdev;
|
struct vhost_dev *vdev;
|
||||||
uint32_t regionidx;
|
uint32_t regionidx;
|
||||||
|
|
||||||
vdev = rte_zmalloc("vhost device", sizeof(*vdev), CACHE_LINE_SIZE);
|
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
|
||||||
if (vdev == NULL) {
|
if (vdev == NULL) {
|
||||||
RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
|
RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
|
||||||
dev->device_fh);
|
dev->device_fh);
|
||||||
@ -2584,7 +2584,7 @@ new_device (struct virtio_net *dev)
|
|||||||
|
|
||||||
vdev->regions_hpa = (struct virtio_memory_regions_hpa *) rte_zmalloc("vhost hpa region",
|
vdev->regions_hpa = (struct virtio_memory_regions_hpa *) rte_zmalloc("vhost hpa region",
|
||||||
sizeof(struct virtio_memory_regions_hpa) * vdev->nregions_hpa,
|
sizeof(struct virtio_memory_regions_hpa) * vdev->nregions_hpa,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (vdev->regions_hpa == NULL) {
|
if (vdev->regions_hpa == NULL) {
|
||||||
RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n");
|
RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n");
|
||||||
rte_free(vdev);
|
rte_free(vdev);
|
||||||
|
@ -255,8 +255,8 @@ virtio_net_config_ll *new_device(unsigned int virtio_idx, struct xen_guest *gues
|
|||||||
|
|
||||||
/* Setup device and virtqueues. */
|
/* Setup device and virtqueues. */
|
||||||
new_ll_dev = calloc(1, sizeof(struct virtio_net_config_ll));
|
new_ll_dev = calloc(1, sizeof(struct virtio_net_config_ll));
|
||||||
virtqueue_rx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), CACHE_LINE_SIZE);
|
virtqueue_rx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
|
||||||
virtqueue_tx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), CACHE_LINE_SIZE);
|
virtqueue_tx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
|
||||||
if (new_ll_dev == NULL || virtqueue_rx == NULL || virtqueue_tx == NULL)
|
if (new_ll_dev == NULL || virtqueue_rx == NULL || virtqueue_tx == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -408,7 +408,7 @@ add_all_channels(const char *vm_name)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
chan_info = rte_malloc(NULL, sizeof(*chan_info),
|
chan_info = rte_malloc(NULL, sizeof(*chan_info),
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (chan_info == NULL) {
|
if (chan_info == NULL) {
|
||||||
RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
|
RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
|
||||||
"channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name);
|
"channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name);
|
||||||
@ -476,7 +476,7 @@ add_channels(const char *vm_name, unsigned *channel_list,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
chan_info = rte_malloc(NULL, sizeof(*chan_info),
|
chan_info = rte_malloc(NULL, sizeof(*chan_info),
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (chan_info == NULL) {
|
if (chan_info == NULL) {
|
||||||
RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
|
RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
|
||||||
"channel '%s'\n", socket_path);
|
"channel '%s'\n", socket_path);
|
||||||
@ -639,7 +639,7 @@ add_vm(const char *vm_name)
|
|||||||
}
|
}
|
||||||
|
|
||||||
new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain),
|
new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain),
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (new_domain == NULL) {
|
if (new_domain == NULL) {
|
||||||
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM "
|
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM "
|
||||||
"info\n");
|
"info\n");
|
||||||
@ -745,13 +745,13 @@ channel_manager_init(const char *path)
|
|||||||
global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
|
global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
|
||||||
|
|
||||||
global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
|
global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
|
||||||
CHANNEL_CMDS_MAX_CPUS, CACHE_LINE_SIZE);
|
CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
|
||||||
if (global_vircpuinfo == NULL) {
|
if (global_vircpuinfo == NULL) {
|
||||||
RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
|
RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
|
global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (global_cpumaps == NULL) {
|
if (global_cpumaps == NULL) {
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ channel_monitor_init(void)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
global_events_list = rte_malloc("epoll_events", sizeof(*global_events_list)
|
global_events_list = rte_malloc("epoll_events", sizeof(*global_events_list)
|
||||||
* MAX_EVENTS, CACHE_LINE_SIZE);
|
* MAX_EVENTS, RTE_CACHE_LINE_SIZE);
|
||||||
if (global_events_list == NULL) {
|
if (global_events_list == NULL) {
|
||||||
RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for "
|
RTE_LOG(ERR, CHANNEL_MONITOR, "Unable to rte_malloc for "
|
||||||
"epoll events\n");
|
"epoll events\n");
|
||||||
|
@ -415,12 +415,12 @@ rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
|
|||||||
node_bld_trie, num_tries, match_num);
|
node_bld_trie, num_tries, match_num);
|
||||||
|
|
||||||
/* Allocate runtime memory (align to cache boundary) */
|
/* Allocate runtime memory (align to cache boundary) */
|
||||||
total_size = RTE_ALIGN(data_index_sz, CACHE_LINE_SIZE) +
|
total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) +
|
||||||
indices.match_index * sizeof(uint64_t) +
|
indices.match_index * sizeof(uint64_t) +
|
||||||
(match_num + 2) * sizeof(struct rte_acl_match_results) +
|
(match_num + 2) * sizeof(struct rte_acl_match_results) +
|
||||||
XMM_SIZE;
|
XMM_SIZE;
|
||||||
|
|
||||||
mem = rte_zmalloc_socket(ctx->name, total_size, CACHE_LINE_SIZE,
|
mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE,
|
||||||
ctx->socket_id);
|
ctx->socket_id);
|
||||||
if (mem == NULL) {
|
if (mem == NULL) {
|
||||||
RTE_LOG(ERR, ACL,
|
RTE_LOG(ERR, ACL,
|
||||||
@ -432,7 +432,7 @@ rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
|
|||||||
/* Fill the runtime structure */
|
/* Fill the runtime structure */
|
||||||
match_index = indices.match_index;
|
match_index = indices.match_index;
|
||||||
node_array = (uint64_t *)((uintptr_t)mem +
|
node_array = (uint64_t *)((uintptr_t)mem +
|
||||||
RTE_ALIGN(data_index_sz, CACHE_LINE_SIZE));
|
RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the NOMATCH node (a SINGLE at the
|
* Setup the NOMATCH node (a SINGLE at the
|
||||||
|
@ -203,7 +203,7 @@ rte_acl_create(const struct rte_acl_param *param)
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = rte_zmalloc_socket(name, sz, CACHE_LINE_SIZE, param->socket_id);
|
ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
|
||||||
|
|
||||||
if (ctx == NULL) {
|
if (ctx == NULL) {
|
||||||
RTE_LOG(ERR, ACL,
|
RTE_LOG(ERR, ACL,
|
||||||
|
@ -180,13 +180,13 @@ rte_rdtsc(void)
|
|||||||
* rte_memory related.
|
* rte_memory related.
|
||||||
*/
|
*/
|
||||||
#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
|
#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
|
||||||
#define CACHE_LINE_SIZE 64 /**< Cache line size. */
|
#define RTE_CACHE_LINE_SIZE 64 /**< Cache line size. */
|
||||||
#define CACHE_LINE_MASK (CACHE_LINE_SIZE-1) /**< Cache line mask. */
|
#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Force alignment to cache line.
|
* Force alignment to cache line.
|
||||||
*/
|
*/
|
||||||
#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE)))
|
#define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -77,7 +77,7 @@
|
|||||||
*/
|
*/
|
||||||
union rte_distributor_buffer {
|
union rte_distributor_buffer {
|
||||||
volatile int64_t bufptr64;
|
volatile int64_t bufptr64;
|
||||||
char pad[CACHE_LINE_SIZE*3];
|
char pad[RTE_CACHE_LINE_SIZE*3];
|
||||||
} __rte_cache_aligned;
|
} __rte_cache_aligned;
|
||||||
|
|
||||||
struct rte_distributor_backlog {
|
struct rte_distributor_backlog {
|
||||||
@ -450,7 +450,7 @@ rte_distributor_create(const char *name,
|
|||||||
const struct rte_memzone *mz;
|
const struct rte_memzone *mz;
|
||||||
|
|
||||||
/* compilation-time checks */
|
/* compilation-time checks */
|
||||||
RTE_BUILD_BUG_ON((sizeof(*d) & CACHE_LINE_MASK) != 0);
|
RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
|
||||||
RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
|
RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
|
||||||
RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
|
RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
|
||||||
sizeof(d->in_flight_bitmask) * CHAR_BIT);
|
sizeof(d->in_flight_bitmask) * CHAR_BIT);
|
||||||
|
@ -86,7 +86,7 @@ rte_memzone_reserve(const char *name, size_t len, int socket_id,
|
|||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
return rte_memzone_reserve_aligned(name,
|
return rte_memzone_reserve_aligned(name,
|
||||||
len, socket_id, flags, CACHE_LINE_SIZE);
|
len, socket_id, flags, RTE_CACHE_LINE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -164,21 +164,21 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* alignment less than cache size is not allowed */
|
/* alignment less than cache size is not allowed */
|
||||||
if (align < CACHE_LINE_SIZE)
|
if (align < RTE_CACHE_LINE_SIZE)
|
||||||
align = CACHE_LINE_SIZE;
|
align = RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
|
|
||||||
/* align length on cache boundary. Check for overflow before doing so */
|
/* align length on cache boundary. Check for overflow before doing so */
|
||||||
if (len > SIZE_MAX - CACHE_LINE_MASK) {
|
if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) {
|
||||||
rte_errno = EINVAL; /* requested size too big */
|
rte_errno = EINVAL; /* requested size too big */
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
len += CACHE_LINE_MASK;
|
len += RTE_CACHE_LINE_MASK;
|
||||||
len &= ~((size_t) CACHE_LINE_MASK);
|
len &= ~((size_t) RTE_CACHE_LINE_MASK);
|
||||||
|
|
||||||
/* save minimal requested length */
|
/* save minimal requested length */
|
||||||
requested_len = RTE_MAX((size_t)CACHE_LINE_SIZE, len);
|
requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
|
||||||
|
|
||||||
/* check that boundary condition is valid */
|
/* check that boundary condition is valid */
|
||||||
if (bound != 0 &&
|
if (bound != 0 &&
|
||||||
@ -430,8 +430,8 @@ memseg_sanitize(struct rte_memseg *memseg)
|
|||||||
unsigned virt_align;
|
unsigned virt_align;
|
||||||
unsigned off;
|
unsigned off;
|
||||||
|
|
||||||
phys_align = memseg->phys_addr & CACHE_LINE_MASK;
|
phys_align = memseg->phys_addr & RTE_CACHE_LINE_MASK;
|
||||||
virt_align = (unsigned long)memseg->addr & CACHE_LINE_MASK;
|
virt_align = (unsigned long)memseg->addr & RTE_CACHE_LINE_MASK;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sanity check: phys_addr and addr must have the same
|
* sanity check: phys_addr and addr must have the same
|
||||||
@ -441,19 +441,19 @@ memseg_sanitize(struct rte_memseg *memseg)
|
|||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
/* memseg is really too small, don't bother with it */
|
/* memseg is really too small, don't bother with it */
|
||||||
if (memseg->len < (2 * CACHE_LINE_SIZE)) {
|
if (memseg->len < (2 * RTE_CACHE_LINE_SIZE)) {
|
||||||
memseg->len = 0;
|
memseg->len = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* align start address */
|
/* align start address */
|
||||||
off = (CACHE_LINE_SIZE - phys_align) & CACHE_LINE_MASK;
|
off = (RTE_CACHE_LINE_SIZE - phys_align) & RTE_CACHE_LINE_MASK;
|
||||||
memseg->phys_addr += off;
|
memseg->phys_addr += off;
|
||||||
memseg->addr = (char *)memseg->addr + off;
|
memseg->addr = (char *)memseg->addr + off;
|
||||||
memseg->len -= off;
|
memseg->len -= off;
|
||||||
|
|
||||||
/* align end address */
|
/* align end address */
|
||||||
memseg->len &= ~((uint64_t)CACHE_LINE_MASK);
|
memseg->len &= ~((uint64_t)RTE_CACHE_LINE_MASK);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -62,19 +62,19 @@ enum rte_page_sizes {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
|
#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
|
||||||
#ifndef CACHE_LINE_SIZE
|
#ifndef RTE_CACHE_LINE_SIZE
|
||||||
#define CACHE_LINE_SIZE 64 /**< Cache line size. */
|
#define RTE_CACHE_LINE_SIZE 64 /**< Cache line size. */
|
||||||
#endif
|
#endif
|
||||||
#define CACHE_LINE_MASK (CACHE_LINE_SIZE-1) /**< Cache line mask. */
|
#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1) /**< Cache line mask. */
|
||||||
|
|
||||||
#define CACHE_LINE_ROUNDUP(size) \
|
#define RTE_CACHE_LINE_ROUNDUP(size) \
|
||||||
(CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE))
|
(RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE))
|
||||||
/**< Return the first cache-aligned value greater or equal to size. */
|
/**< Return the first cache-aligned value greater or equal to size. */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Force alignment to cache line.
|
* Force alignment to cache line.
|
||||||
*/
|
*/
|
||||||
#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE)))
|
#define __rte_cache_aligned __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)))
|
||||||
|
|
||||||
typedef uint64_t phys_addr_t; /**< Physical address definition. */
|
typedef uint64_t phys_addr_t; /**< Physical address definition. */
|
||||||
#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
|
#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
|
||||||
|
@ -256,7 +256,7 @@ rte_eth_dev_init(struct rte_pci_driver *pci_drv,
|
|||||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY){
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY){
|
||||||
eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
|
eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
|
||||||
eth_drv->dev_private_size,
|
eth_drv->dev_private_size,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (eth_dev->data->dev_private == NULL)
|
if (eth_dev->data->dev_private == NULL)
|
||||||
rte_panic("Cannot allocate memzone for private port data\n");
|
rte_panic("Cannot allocate memzone for private port data\n");
|
||||||
}
|
}
|
||||||
@ -332,7 +332,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|||||||
if (dev->data->rx_queues == NULL) { /* first time configuration */
|
if (dev->data->rx_queues == NULL) { /* first time configuration */
|
||||||
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
|
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
|
||||||
sizeof(dev->data->rx_queues[0]) * nb_queues,
|
sizeof(dev->data->rx_queues[0]) * nb_queues,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (dev->data->rx_queues == NULL) {
|
if (dev->data->rx_queues == NULL) {
|
||||||
dev->data->nb_rx_queues = 0;
|
dev->data->nb_rx_queues = 0;
|
||||||
return -(ENOMEM);
|
return -(ENOMEM);
|
||||||
@ -345,7 +345,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|||||||
for (i = nb_queues; i < old_nb_queues; i++)
|
for (i = nb_queues; i < old_nb_queues; i++)
|
||||||
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
||||||
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
|
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (rxq == NULL)
|
if (rxq == NULL)
|
||||||
return -(ENOMEM);
|
return -(ENOMEM);
|
||||||
|
|
||||||
@ -474,7 +474,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|||||||
if (dev->data->tx_queues == NULL) { /* first time configuration */
|
if (dev->data->tx_queues == NULL) { /* first time configuration */
|
||||||
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
|
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
|
||||||
sizeof(dev->data->tx_queues[0]) * nb_queues,
|
sizeof(dev->data->tx_queues[0]) * nb_queues,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (dev->data->tx_queues == NULL) {
|
if (dev->data->tx_queues == NULL) {
|
||||||
dev->data->nb_tx_queues = 0;
|
dev->data->nb_tx_queues = 0;
|
||||||
return -(ENOMEM);
|
return -(ENOMEM);
|
||||||
@ -487,7 +487,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|||||||
for (i = nb_queues; i < old_nb_queues; i++)
|
for (i = nb_queues; i < old_nb_queues; i++)
|
||||||
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
||||||
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
|
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (txq == NULL)
|
if (txq == NULL)
|
||||||
return -(ENOMEM);
|
return -(ENOMEM);
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
#include <sys/queue.h>
|
#include <sys/queue.h>
|
||||||
|
|
||||||
#include <rte_common.h>
|
#include <rte_common.h>
|
||||||
#include <rte_memory.h> /* for definition of CACHE_LINE_SIZE */
|
#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
|
||||||
#include <rte_log.h>
|
#include <rte_log.h>
|
||||||
#include <rte_memcpy.h>
|
#include <rte_memcpy.h>
|
||||||
#include <rte_prefetch.h>
|
#include <rte_prefetch.h>
|
||||||
@ -206,11 +206,11 @@ rte_hash_create(const struct rte_hash_parameters *params)
|
|||||||
sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
|
sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
|
||||||
key_size = align_size(params->key_len, KEY_ALIGNMENT);
|
key_size = align_size(params->key_len, KEY_ALIGNMENT);
|
||||||
|
|
||||||
hash_tbl_size = align_size(sizeof(struct rte_hash), CACHE_LINE_SIZE);
|
hash_tbl_size = align_size(sizeof(struct rte_hash), RTE_CACHE_LINE_SIZE);
|
||||||
sig_tbl_size = align_size(num_buckets * sig_bucket_size,
|
sig_tbl_size = align_size(num_buckets * sig_bucket_size,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
key_tbl_size = align_size(num_buckets * key_size *
|
key_tbl_size = align_size(num_buckets * key_size *
|
||||||
params->bucket_entries, CACHE_LINE_SIZE);
|
params->bucket_entries, RTE_CACHE_LINE_SIZE);
|
||||||
|
|
||||||
/* Total memory required for hash context */
|
/* Total memory required for hash context */
|
||||||
mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;
|
mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;
|
||||||
@ -233,7 +233,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
|
|||||||
}
|
}
|
||||||
|
|
||||||
h = (struct rte_hash *)rte_zmalloc_socket(hash_name, mem_size,
|
h = (struct rte_hash *)rte_zmalloc_socket(hash_name, mem_size,
|
||||||
CACHE_LINE_SIZE, params->socket_id);
|
RTE_CACHE_LINE_SIZE, params->socket_id);
|
||||||
if (h == NULL) {
|
if (h == NULL) {
|
||||||
RTE_LOG(ERR, HASH, "memory allocation failed\n");
|
RTE_LOG(ERR, HASH, "memory allocation failed\n");
|
||||||
rte_free(te);
|
rte_free(te);
|
||||||
|
@ -87,7 +87,7 @@ rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
|
|||||||
}
|
}
|
||||||
|
|
||||||
sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
|
sz = sizeof (*tbl) + nb_entries * sizeof (tbl->pkt[0]);
|
||||||
if ((tbl = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
|
if ((tbl = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
|
||||||
socket_id)) == NULL) {
|
socket_id)) == NULL) {
|
||||||
RTE_LOG(ERR, USER1,
|
RTE_LOG(ERR, USER1,
|
||||||
"%s: allocation of %zu bytes at socket %d failed do\n",
|
"%s: allocation of %zu bytes at socket %d failed do\n",
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
#include <rte_log.h>
|
#include <rte_log.h>
|
||||||
#include <rte_branch_prediction.h>
|
#include <rte_branch_prediction.h>
|
||||||
#include <rte_common.h>
|
#include <rte_common.h>
|
||||||
#include <rte_memory.h> /* for definition of CACHE_LINE_SIZE */
|
#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
|
||||||
#include <rte_malloc.h>
|
#include <rte_malloc.h>
|
||||||
#include <rte_memzone.h>
|
#include <rte_memzone.h>
|
||||||
#include <rte_tailq.h>
|
#include <rte_tailq.h>
|
||||||
@ -199,7 +199,7 @@ rte_lpm_create(const char *name, int socket_id, int max_rules,
|
|||||||
|
|
||||||
/* Allocate memory to store the LPM data structures. */
|
/* Allocate memory to store the LPM data structures. */
|
||||||
lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
|
lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (lpm == NULL) {
|
if (lpm == NULL) {
|
||||||
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
|
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
|
||||||
rte_free(te);
|
rte_free(te);
|
||||||
|
@ -195,7 +195,7 @@ rte_lpm6_create(const char *name, int socket_id,
|
|||||||
|
|
||||||
/* Allocate memory to store the LPM data structures. */
|
/* Allocate memory to store the LPM data structures. */
|
||||||
lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
|
lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
|
|
||||||
if (lpm == NULL) {
|
if (lpm == NULL) {
|
||||||
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
|
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
|
||||||
@ -204,7 +204,7 @@ rte_lpm6_create(const char *name, int socket_id,
|
|||||||
}
|
}
|
||||||
|
|
||||||
lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
|
lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
|
||||||
(size_t)rules_size, CACHE_LINE_SIZE, socket_id);
|
(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
|
|
||||||
if (lpm->rules_tbl == NULL) {
|
if (lpm->rules_tbl == NULL) {
|
||||||
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
|
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
|
||||||
|
@ -50,7 +50,7 @@
|
|||||||
#include "malloc_elem.h"
|
#include "malloc_elem.h"
|
||||||
#include "malloc_heap.h"
|
#include "malloc_heap.h"
|
||||||
|
|
||||||
#define MIN_DATA_SIZE (CACHE_LINE_SIZE)
|
#define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* initialise a general malloc_elem header structure
|
* initialise a general malloc_elem header structure
|
||||||
@ -308,7 +308,7 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
|
|||||||
if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
|
if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
|
||||||
/* now we have a big block together. Lets cut it down a bit, by splitting */
|
/* now we have a big block together. Lets cut it down a bit, by splitting */
|
||||||
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
|
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
|
||||||
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, CACHE_LINE_SIZE);
|
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
|
||||||
split_elem(elem, split_pt);
|
split_elem(elem, split_pt);
|
||||||
malloc_elem_free_list_insert(split_pt);
|
malloc_elem_free_list_insert(split_pt);
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ set_trailer(struct malloc_elem *elem __rte_unused){ }
|
|||||||
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static const unsigned MALLOC_ELEM_TRAILER_LEN = CACHE_LINE_SIZE;
|
static const unsigned MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
#define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */
|
#define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */
|
||||||
#define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
|
#define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
|
||||||
|
@ -109,7 +109,7 @@ malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align)
|
|||||||
struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr;
|
struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr;
|
||||||
struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr,
|
struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr,
|
||||||
mz_size - MALLOC_ELEM_OVERHEAD);
|
mz_size - MALLOC_ELEM_OVERHEAD);
|
||||||
end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, CACHE_LINE_SIZE);
|
end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE);
|
||||||
|
|
||||||
const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
|
const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
|
||||||
malloc_elem_init(start_elem, heap, mz, elem_size);
|
malloc_elem_init(start_elem, heap, mz, elem_size);
|
||||||
@ -155,8 +155,8 @@ void *
|
|||||||
malloc_heap_alloc(struct malloc_heap *heap,
|
malloc_heap_alloc(struct malloc_heap *heap,
|
||||||
const char *type __attribute__((unused)), size_t size, unsigned align)
|
const char *type __attribute__((unused)), size_t size, unsigned align)
|
||||||
{
|
{
|
||||||
size = CACHE_LINE_ROUNDUP(size);
|
size = RTE_CACHE_LINE_ROUNDUP(size);
|
||||||
align = CACHE_LINE_ROUNDUP(align);
|
align = RTE_CACHE_LINE_ROUNDUP(align);
|
||||||
rte_spinlock_lock(&heap->lock);
|
rte_spinlock_lock(&heap->lock);
|
||||||
struct malloc_elem *elem = find_suitable_element(heap, size, align);
|
struct malloc_elem *elem = find_suitable_element(heap, size, align);
|
||||||
if (elem == NULL){
|
if (elem == NULL){
|
||||||
|
@ -169,7 +169,7 @@ rte_realloc(void *ptr, size_t size, unsigned align)
|
|||||||
if (elem == NULL)
|
if (elem == NULL)
|
||||||
rte_panic("Fatal error: memory corruption detected\n");
|
rte_panic("Fatal error: memory corruption detected\n");
|
||||||
|
|
||||||
size = CACHE_LINE_ROUNDUP(size), align = CACHE_LINE_ROUNDUP(align);
|
size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
|
||||||
/* check alignment matches first, and if ok, see if we can resize block */
|
/* check alignment matches first, and if ok, see if we can resize block */
|
||||||
if (RTE_PTR_ALIGN(ptr,align) == ptr &&
|
if (RTE_PTR_ALIGN(ptr,align) == ptr &&
|
||||||
malloc_elem_resize(elem, size) == 0)
|
malloc_elem_resize(elem, size) == 0)
|
||||||
|
@ -114,10 +114,10 @@ static unsigned optimize_object_size(unsigned obj_size)
|
|||||||
nrank = 1;
|
nrank = 1;
|
||||||
|
|
||||||
/* process new object size */
|
/* process new object size */
|
||||||
new_obj_size = (obj_size + CACHE_LINE_MASK) / CACHE_LINE_SIZE;
|
new_obj_size = (obj_size + RTE_CACHE_LINE_MASK) / RTE_CACHE_LINE_SIZE;
|
||||||
while (get_gcd(new_obj_size, nrank * nchan) != 1)
|
while (get_gcd(new_obj_size, nrank * nchan) != 1)
|
||||||
new_obj_size++;
|
new_obj_size++;
|
||||||
return new_obj_size * CACHE_LINE_SIZE;
|
return new_obj_size * RTE_CACHE_LINE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -255,7 +255,7 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
|
|||||||
#endif
|
#endif
|
||||||
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
|
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
|
||||||
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
|
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
|
|
||||||
/* trailer contains the cookie in debug mode */
|
/* trailer contains the cookie in debug mode */
|
||||||
sz->trailer_size = 0;
|
sz->trailer_size = 0;
|
||||||
@ -269,9 +269,9 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
|
|||||||
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
|
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
|
||||||
sz->total_size = sz->header_size + sz->elt_size +
|
sz->total_size = sz->header_size + sz->elt_size +
|
||||||
sz->trailer_size;
|
sz->trailer_size;
|
||||||
sz->trailer_size += ((CACHE_LINE_SIZE -
|
sz->trailer_size += ((RTE_CACHE_LINE_SIZE -
|
||||||
(sz->total_size & CACHE_LINE_MASK)) &
|
(sz->total_size & RTE_CACHE_LINE_MASK)) &
|
||||||
CACHE_LINE_MASK);
|
RTE_CACHE_LINE_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -418,18 +418,18 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
|
|||||||
|
|
||||||
/* compilation-time checks */
|
/* compilation-time checks */
|
||||||
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
|
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
|
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
|
||||||
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
|
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
|
RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
#endif
|
#endif
|
||||||
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
|
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
|
||||||
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
|
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
|
RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* check that we have an initialised tail queue */
|
/* check that we have an initialised tail queue */
|
||||||
@ -489,7 +489,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
|
|||||||
* cache-aligned
|
* cache-aligned
|
||||||
*/
|
*/
|
||||||
private_data_size = (private_data_size +
|
private_data_size = (private_data_size +
|
||||||
CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
|
RTE_CACHE_LINE_MASK) & (~RTE_CACHE_LINE_MASK);
|
||||||
|
|
||||||
if (! rte_eal_has_hugepages()) {
|
if (! rte_eal_has_hugepages()) {
|
||||||
/*
|
/*
|
||||||
|
@ -216,7 +216,7 @@ struct rte_mempool {
|
|||||||
*/
|
*/
|
||||||
#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
|
#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
|
||||||
RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
|
RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
|
||||||
sizeof ((mp)->elt_pa[0]), CACHE_LINE_SIZE))
|
sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns TRUE if whole mempool is allocated in one contiguous block of memory.
|
* Returns TRUE if whole mempool is allocated in one contiguous block of memory.
|
||||||
|
@ -203,7 +203,7 @@ rte_pipeline_create(struct rte_pipeline_params *params)
|
|||||||
|
|
||||||
/* Allocate memory for the pipeline on requested socket */
|
/* Allocate memory for the pipeline on requested socket */
|
||||||
p = rte_zmalloc_socket("PIPELINE", sizeof(struct rte_pipeline),
|
p = rte_zmalloc_socket("PIPELINE", sizeof(struct rte_pipeline),
|
||||||
CACHE_LINE_SIZE, params->socket_id);
|
RTE_CACHE_LINE_SIZE, params->socket_id);
|
||||||
|
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
RTE_LOG(ERR, PIPELINE,
|
RTE_LOG(ERR, PIPELINE,
|
||||||
@ -343,7 +343,7 @@ rte_pipeline_table_create(struct rte_pipeline *p,
|
|||||||
entry_size = sizeof(struct rte_pipeline_table_entry) +
|
entry_size = sizeof(struct rte_pipeline_table_entry) +
|
||||||
params->action_data_size;
|
params->action_data_size;
|
||||||
default_entry = (struct rte_pipeline_table_entry *) rte_zmalloc_socket(
|
default_entry = (struct rte_pipeline_table_entry *) rte_zmalloc_socket(
|
||||||
"PIPELINE", entry_size, CACHE_LINE_SIZE, p->socket_id);
|
"PIPELINE", entry_size, RTE_CACHE_LINE_SIZE, p->socket_id);
|
||||||
if (default_entry == NULL) {
|
if (default_entry == NULL) {
|
||||||
RTE_LOG(ERR, PIPELINE,
|
RTE_LOG(ERR, PIPELINE,
|
||||||
"%s: Failed to allocate default entry\n", __func__);
|
"%s: Failed to allocate default entry\n", __func__);
|
||||||
|
@ -1120,7 +1120,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
|
|||||||
|
|
||||||
#ifdef RTE_LIBRTE_XEN_DOM0
|
#ifdef RTE_LIBRTE_XEN_DOM0
|
||||||
return rte_memzone_reserve_bounded(z_name, ring_size,
|
return rte_memzone_reserve_bounded(z_name, ring_size,
|
||||||
socket_id, 0, CACHE_LINE_SIZE, RTE_PGSIZE_2M);
|
socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
|
||||||
#else
|
#else
|
||||||
return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
|
return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
|
||||||
#endif
|
#endif
|
||||||
@ -1279,13 +1279,13 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
/* Allocate the tx queue data structure. */
|
/* Allocate the tx queue data structure. */
|
||||||
if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
|
if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
|
||||||
CACHE_LINE_SIZE)) == NULL)
|
RTE_CACHE_LINE_SIZE)) == NULL)
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
|
||||||
/* Allocate software ring */
|
/* Allocate software ring */
|
||||||
if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
|
if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
|
||||||
sizeof(txq->sw_ring[0]) * nb_desc,
|
sizeof(txq->sw_ring[0]) * nb_desc,
|
||||||
CACHE_LINE_SIZE)) == NULL) {
|
RTE_CACHE_LINE_SIZE)) == NULL) {
|
||||||
em_tx_queue_release(txq);
|
em_tx_queue_release(txq);
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
}
|
}
|
||||||
@ -1406,13 +1406,13 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
/* Allocate the RX queue data structure. */
|
/* Allocate the RX queue data structure. */
|
||||||
if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
|
if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
|
||||||
CACHE_LINE_SIZE)) == NULL)
|
RTE_CACHE_LINE_SIZE)) == NULL)
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
|
||||||
/* Allocate software ring. */
|
/* Allocate software ring. */
|
||||||
if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
|
if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
|
||||||
sizeof (rxq->sw_ring[0]) * nb_desc,
|
sizeof (rxq->sw_ring[0]) * nb_desc,
|
||||||
CACHE_LINE_SIZE)) == NULL) {
|
RTE_CACHE_LINE_SIZE)) == NULL) {
|
||||||
em_rx_queue_release(rxq);
|
em_rx_queue_release(rxq);
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
}
|
}
|
||||||
|
@ -1255,7 +1255,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
/* First allocate the tx queue data structure */
|
/* First allocate the tx queue data structure */
|
||||||
txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
|
txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (txq == NULL)
|
if (txq == NULL)
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
|
||||||
@ -1293,7 +1293,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
/* Allocate software ring */
|
/* Allocate software ring */
|
||||||
txq->sw_ring = rte_zmalloc("txq->sw_ring",
|
txq->sw_ring = rte_zmalloc("txq->sw_ring",
|
||||||
sizeof(struct igb_tx_entry) * nb_desc,
|
sizeof(struct igb_tx_entry) * nb_desc,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (txq->sw_ring == NULL) {
|
if (txq->sw_ring == NULL) {
|
||||||
igb_tx_queue_release(txq);
|
igb_tx_queue_release(txq);
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
@ -1389,7 +1389,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
/* First allocate the RX queue data structure. */
|
/* First allocate the RX queue data structure. */
|
||||||
rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
|
rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (rxq == NULL)
|
if (rxq == NULL)
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
rxq->mb_pool = mp;
|
rxq->mb_pool = mp;
|
||||||
@ -1431,7 +1431,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
/* Allocate software ring. */
|
/* Allocate software ring. */
|
||||||
rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
|
rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
|
||||||
sizeof(struct igb_rx_entry) * nb_desc,
|
sizeof(struct igb_rx_entry) * nb_desc,
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
if (rxq->sw_ring == NULL) {
|
if (rxq->sw_ring == NULL) {
|
||||||
igb_rx_queue_release(rxq);
|
igb_rx_queue_release(rxq);
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
@ -1752,7 +1752,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
/* Allocate the rx queue data structure */
|
/* Allocate the rx queue data structure */
|
||||||
rxq = rte_zmalloc_socket("i40e rx queue",
|
rxq = rte_zmalloc_socket("i40e rx queue",
|
||||||
sizeof(struct i40e_rx_queue),
|
sizeof(struct i40e_rx_queue),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (!rxq) {
|
if (!rxq) {
|
||||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
||||||
@ -1811,7 +1811,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
rxq->sw_ring =
|
rxq->sw_ring =
|
||||||
rte_zmalloc_socket("i40e rx sw ring",
|
rte_zmalloc_socket("i40e rx sw ring",
|
||||||
sizeof(struct i40e_rx_entry) * len,
|
sizeof(struct i40e_rx_entry) * len,
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (!rxq->sw_ring) {
|
if (!rxq->sw_ring) {
|
||||||
i40e_dev_rx_queue_release(rxq);
|
i40e_dev_rx_queue_release(rxq);
|
||||||
@ -2036,7 +2036,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
/* Allocate the TX queue data structure. */
|
/* Allocate the TX queue data structure. */
|
||||||
txq = rte_zmalloc_socket("i40e tx queue",
|
txq = rte_zmalloc_socket("i40e tx queue",
|
||||||
sizeof(struct i40e_tx_queue),
|
sizeof(struct i40e_tx_queue),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (!txq) {
|
if (!txq) {
|
||||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
||||||
@ -2087,7 +2087,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
txq->sw_ring =
|
txq->sw_ring =
|
||||||
rte_zmalloc_socket("i40e tx sw ring",
|
rte_zmalloc_socket("i40e tx sw ring",
|
||||||
sizeof(struct i40e_tx_entry) * nb_desc,
|
sizeof(struct i40e_tx_entry) * nb_desc,
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (!txq->sw_ring) {
|
if (!txq->sw_ring) {
|
||||||
i40e_dev_tx_queue_release(txq);
|
i40e_dev_tx_queue_release(txq);
|
||||||
@ -2542,7 +2542,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
|
|||||||
/* Allocate the TX queue data structure. */
|
/* Allocate the TX queue data structure. */
|
||||||
txq = rte_zmalloc_socket("i40e fdir tx queue",
|
txq = rte_zmalloc_socket("i40e fdir tx queue",
|
||||||
sizeof(struct i40e_tx_queue),
|
sizeof(struct i40e_tx_queue),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
SOCKET_ID_ANY);
|
SOCKET_ID_ANY);
|
||||||
if (!txq) {
|
if (!txq) {
|
||||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
||||||
@ -2602,7 +2602,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
|
|||||||
/* Allocate the RX queue data structure. */
|
/* Allocate the RX queue data structure. */
|
||||||
rxq = rte_zmalloc_socket("i40e fdir rx queue",
|
rxq = rte_zmalloc_socket("i40e fdir rx queue",
|
||||||
sizeof(struct i40e_rx_queue),
|
sizeof(struct i40e_rx_queue),
|
||||||
CACHE_LINE_SIZE,
|
RTE_CACHE_LINE_SIZE,
|
||||||
SOCKET_ID_ANY);
|
SOCKET_ID_ANY);
|
||||||
if (!rxq) {
|
if (!rxq) {
|
||||||
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
|
||||||
|
@ -1877,7 +1877,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
/* First allocate the tx queue data structure */
|
/* First allocate the tx queue data structure */
|
||||||
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
|
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (txq == NULL)
|
if (txq == NULL)
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
|
||||||
@ -1925,7 +1925,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
/* Allocate software ring */
|
/* Allocate software ring */
|
||||||
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
|
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
|
||||||
sizeof(struct igb_tx_entry) * nb_desc,
|
sizeof(struct igb_tx_entry) * nb_desc,
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (txq->sw_ring == NULL) {
|
if (txq->sw_ring == NULL) {
|
||||||
ixgbe_tx_queue_release(txq);
|
ixgbe_tx_queue_release(txq);
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
@ -2163,7 +2163,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
/* First allocate the rx queue data structure */
|
/* First allocate the rx queue data structure */
|
||||||
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
|
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (rxq == NULL)
|
if (rxq == NULL)
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
rxq->mb_pool = mp;
|
rxq->mb_pool = mp;
|
||||||
@ -2229,7 +2229,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
#endif
|
#endif
|
||||||
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
|
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
|
||||||
sizeof(struct igb_rx_entry) * len,
|
sizeof(struct igb_rx_entry) * len,
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (rxq->sw_ring == NULL) {
|
if (rxq->sw_ring == NULL) {
|
||||||
ixgbe_rx_queue_release(rxq);
|
ixgbe_rx_queue_release(rxq);
|
||||||
return (-ENOMEM);
|
return (-ENOMEM);
|
||||||
|
@ -274,18 +274,18 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
|||||||
snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
|
snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
|
||||||
dev->data->port_id, queue_idx);
|
dev->data->port_id, queue_idx);
|
||||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
||||||
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
|
vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
|
||||||
} else if (queue_type == VTNET_TQ) {
|
} else if (queue_type == VTNET_TQ) {
|
||||||
snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
|
snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
|
||||||
dev->data->port_id, queue_idx);
|
dev->data->port_id, queue_idx);
|
||||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
||||||
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
|
vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
|
||||||
} else if (queue_type == VTNET_CQ) {
|
} else if (queue_type == VTNET_CQ) {
|
||||||
snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
|
snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
|
||||||
dev->data->port_id);
|
dev->data->port_id);
|
||||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
||||||
vq_size * sizeof(struct vq_desc_extra),
|
vq_size * sizeof(struct vq_desc_extra),
|
||||||
CACHE_LINE_SIZE);
|
RTE_CACHE_LINE_SIZE);
|
||||||
}
|
}
|
||||||
if (vq == NULL) {
|
if (vq == NULL) {
|
||||||
PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
|
PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
|
||||||
@ -342,7 +342,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
|||||||
dev->data->port_id, queue_idx);
|
dev->data->port_id, queue_idx);
|
||||||
vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
|
vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
|
||||||
vq_size * hw->vtnet_hdr_size,
|
vq_size * hw->vtnet_hdr_size,
|
||||||
socket_id, 0, CACHE_LINE_SIZE);
|
socket_id, 0, RTE_CACHE_LINE_SIZE);
|
||||||
if (vq->virtio_net_hdr_mz == NULL) {
|
if (vq->virtio_net_hdr_mz == NULL) {
|
||||||
rte_free(vq);
|
rte_free(vq);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -356,7 +356,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
|||||||
snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
|
snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
|
||||||
dev->data->port_id);
|
dev->data->port_id);
|
||||||
vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
|
vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
|
||||||
PAGE_SIZE, socket_id, 0, CACHE_LINE_SIZE);
|
PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);
|
||||||
if (vq->virtio_net_hdr_mz == NULL) {
|
if (vq->virtio_net_hdr_mz == NULL) {
|
||||||
rte_free(vq);
|
rte_free(vq);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -441,7 +441,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define VIRTIO_MBUF_BURST_SZ 64
|
#define VIRTIO_MBUF_BURST_SZ 64
|
||||||
#define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))
|
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
|
||||||
uint16_t
|
uint16_t
|
||||||
virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||||
{
|
{
|
||||||
|
@ -347,7 +347,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
|
|||||||
|
|
||||||
/* Allocate memory structure for UPT1_RSSConf and configure */
|
/* Allocate memory structure for UPT1_RSSConf and configure */
|
||||||
mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
|
mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
|
||||||
rte_socket_id(), CACHE_LINE_SIZE);
|
rte_socket_id(), RTE_CACHE_LINE_SIZE);
|
||||||
if (mz == NULL) {
|
if (mz == NULL) {
|
||||||
PMD_INIT_LOG(ERR,
|
PMD_INIT_LOG(ERR,
|
||||||
"ERROR: Creating rss_conf structure zone");
|
"ERROR: Creating rss_conf structure zone");
|
||||||
|
@ -744,7 +744,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), CACHE_LINE_SIZE);
|
txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
|
||||||
if (txq == NULL) {
|
if (txq == NULL) {
|
||||||
PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
|
PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -810,7 +810,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
|
|
||||||
/* cmd_ring0 buf_info allocation */
|
/* cmd_ring0 buf_info allocation */
|
||||||
ring->buf_info = rte_zmalloc("tx_ring_buf_info",
|
ring->buf_info = rte_zmalloc("tx_ring_buf_info",
|
||||||
ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
|
ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
|
||||||
if (ring->buf_info == NULL) {
|
if (ring->buf_info == NULL) {
|
||||||
PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
|
PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -855,7 +855,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), CACHE_LINE_SIZE);
|
rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
|
||||||
if (rxq == NULL) {
|
if (rxq == NULL) {
|
||||||
PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
|
PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -929,7 +929,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|||||||
ring->rid = i;
|
ring->rid = i;
|
||||||
snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
|
snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
|
||||||
|
|
||||||
ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
|
ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
|
||||||
if (ring->buf_info == NULL) {
|
if (ring->buf_info == NULL) {
|
||||||
PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
|
PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -452,7 +452,7 @@ virtio_queue_setup(struct rte_eth_dev *dev, int queue_type)
|
|||||||
snprintf(vq_name, sizeof(vq_name), "port%d_rvq",
|
snprintf(vq_name, sizeof(vq_name), "port%d_rvq",
|
||||||
dev->data->port_id);
|
dev->data->port_id);
|
||||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
||||||
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
|
vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
|
||||||
if (vq == NULL) {
|
if (vq == NULL) {
|
||||||
RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
|
RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -462,7 +462,7 @@ virtio_queue_setup(struct rte_eth_dev *dev, int queue_type)
|
|||||||
snprintf(vq_name, sizeof(vq_name), "port%d_tvq",
|
snprintf(vq_name, sizeof(vq_name), "port%d_tvq",
|
||||||
dev->data->port_id);
|
dev->data->port_id);
|
||||||
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
|
||||||
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
|
vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
|
||||||
if (vq == NULL) {
|
if (vq == NULL) {
|
||||||
RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
|
RTE_LOG(ERR, PMD, "%s: unabled to allocate virtqueue\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -556,7 +556,7 @@ rte_eth_xenvirt_parse_args(struct xenvirt_dict *dict,
|
|||||||
if (params == NULL)
|
if (params == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
args = rte_zmalloc(NULL, strlen(params) + 1, CACHE_LINE_SIZE);
|
args = rte_zmalloc(NULL, strlen(params) + 1, RTE_CACHE_LINE_SIZE);
|
||||||
if (args == NULL) {
|
if (args == NULL) {
|
||||||
RTE_LOG(ERR, PMD, "Couldn't parse %s device \n", name);
|
RTE_LOG(ERR, PMD, "Couldn't parse %s device \n", name);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -61,7 +61,7 @@ rte_port_ethdev_reader_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -128,7 +128,7 @@ rte_port_ethdev_writer_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -93,7 +93,7 @@ rte_port_ring_reader_ipv4_frag_create(void *params, int socket_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port), CACHE_LINE_SIZE,
|
port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
|
||||||
|
@ -86,7 +86,7 @@ rte_port_ring_writer_ipv4_ras_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate socket\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate socket\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -60,7 +60,7 @@ rte_port_ring_reader_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -120,7 +120,7 @@ rte_port_ring_writer_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -60,7 +60,7 @@ rte_port_sched_reader_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -123,7 +123,7 @@ rte_port_sched_writer_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -61,7 +61,7 @@ rte_port_source_create(void *params, int socket_id)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
port = rte_zmalloc_socket("PORT", sizeof(*port),
|
||||||
CACHE_LINE_SIZE, socket_id);
|
RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -110,7 +110,7 @@ rte_ring_get_memsize(unsigned count)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sz = sizeof(struct rte_ring) + count * sizeof(void *);
|
sz = sizeof(struct rte_ring) + count * sizeof(void *);
|
||||||
sz = RTE_ALIGN(sz, CACHE_LINE_SIZE);
|
sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
|
||||||
return sz;
|
return sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,18 +120,18 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
|
|||||||
{
|
{
|
||||||
/* compilation-time checks */
|
/* compilation-time checks */
|
||||||
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
|
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
#ifdef RTE_RING_SPLIT_PROD_CONS
|
#ifdef RTE_RING_SPLIT_PROD_CONS
|
||||||
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
|
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
#endif
|
#endif
|
||||||
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
|
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
#ifdef RTE_LIBRTE_RING_DEBUG
|
#ifdef RTE_LIBRTE_RING_DEBUG
|
||||||
RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
|
RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
|
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
|
||||||
CACHE_LINE_MASK) != 0);
|
RTE_CACHE_LINE_MASK) != 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* init the ring structure */
|
/* init the ring structure */
|
||||||
|
@ -83,7 +83,7 @@ extern "C" {
|
|||||||
#define RTE_BITMAP_SLAB_BIT_MASK (RTE_BITMAP_SLAB_BIT_SIZE - 1)
|
#define RTE_BITMAP_SLAB_BIT_MASK (RTE_BITMAP_SLAB_BIT_SIZE - 1)
|
||||||
|
|
||||||
/* Cache line (CL) */
|
/* Cache line (CL) */
|
||||||
#define RTE_BITMAP_CL_BIT_SIZE (CACHE_LINE_SIZE * 8)
|
#define RTE_BITMAP_CL_BIT_SIZE (RTE_CACHE_LINE_SIZE * 8)
|
||||||
#define RTE_BITMAP_CL_BIT_SIZE_LOG2 9
|
#define RTE_BITMAP_CL_BIT_SIZE_LOG2 9
|
||||||
#define RTE_BITMAP_CL_BIT_MASK (RTE_BITMAP_CL_BIT_SIZE - 1)
|
#define RTE_BITMAP_CL_BIT_MASK (RTE_BITMAP_CL_BIT_SIZE - 1)
|
||||||
|
|
||||||
@ -178,7 +178,7 @@ __rte_bitmap_get_memory_footprint(uint32_t n_bits,
|
|||||||
n_slabs_array1 = rte_align32pow2(n_slabs_array1);
|
n_slabs_array1 = rte_align32pow2(n_slabs_array1);
|
||||||
n_slabs_context = (sizeof(struct rte_bitmap) + (RTE_BITMAP_SLAB_BIT_SIZE / 8) - 1) / (RTE_BITMAP_SLAB_BIT_SIZE / 8);
|
n_slabs_context = (sizeof(struct rte_bitmap) + (RTE_BITMAP_SLAB_BIT_SIZE / 8) - 1) / (RTE_BITMAP_SLAB_BIT_SIZE / 8);
|
||||||
n_cache_lines_context_and_array1 = (n_slabs_context + n_slabs_array1 + RTE_BITMAP_CL_SLAB_SIZE - 1) / RTE_BITMAP_CL_SLAB_SIZE;
|
n_cache_lines_context_and_array1 = (n_slabs_context + n_slabs_array1 + RTE_BITMAP_CL_SLAB_SIZE - 1) / RTE_BITMAP_CL_SLAB_SIZE;
|
||||||
n_bytes_total = (n_cache_lines_context_and_array1 + n_cache_lines_array2) * CACHE_LINE_SIZE;
|
n_bytes_total = (n_cache_lines_context_and_array1 + n_cache_lines_array2) * RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
if (array1_byte_offset) {
|
if (array1_byte_offset) {
|
||||||
*array1_byte_offset = n_slabs_context * (RTE_BITMAP_SLAB_BIT_SIZE / 8);
|
*array1_byte_offset = n_slabs_context * (RTE_BITMAP_SLAB_BIT_SIZE / 8);
|
||||||
@ -187,7 +187,7 @@ __rte_bitmap_get_memory_footprint(uint32_t n_bits,
|
|||||||
*array1_slabs = n_slabs_array1;
|
*array1_slabs = n_slabs_array1;
|
||||||
}
|
}
|
||||||
if (array2_byte_offset) {
|
if (array2_byte_offset) {
|
||||||
*array2_byte_offset = n_cache_lines_context_and_array1 * CACHE_LINE_SIZE;
|
*array2_byte_offset = n_cache_lines_context_and_array1 * RTE_CACHE_LINE_SIZE;
|
||||||
}
|
}
|
||||||
if (array2_slabs) {
|
if (array2_slabs) {
|
||||||
*array2_slabs = n_cache_lines_array2 * RTE_BITMAP_CL_SLAB_SIZE;
|
*array2_slabs = n_cache_lines_array2 * RTE_BITMAP_CL_SLAB_SIZE;
|
||||||
@ -249,7 +249,7 @@ rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t mem_size)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((mem == NULL) || (((uintptr_t) mem) & CACHE_LINE_MASK)) {
|
if ((mem == NULL) || (((uintptr_t) mem) & RTE_CACHE_LINE_MASK)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -417,25 +417,25 @@ rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sch
|
|||||||
base = 0;
|
base = 0;
|
||||||
|
|
||||||
if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
|
if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
|
||||||
base += CACHE_LINE_ROUNDUP(size_subport);
|
base += RTE_CACHE_LINE_ROUNDUP(size_subport);
|
||||||
|
|
||||||
if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
|
if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
|
||||||
base += CACHE_LINE_ROUNDUP(size_pipe);
|
base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
|
||||||
|
|
||||||
if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
|
if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
|
||||||
base += CACHE_LINE_ROUNDUP(size_queue);
|
base += RTE_CACHE_LINE_ROUNDUP(size_queue);
|
||||||
|
|
||||||
if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return base;
|
if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return base;
|
||||||
base += CACHE_LINE_ROUNDUP(size_queue_extra);
|
base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
|
||||||
|
|
||||||
if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return base;
|
if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return base;
|
||||||
base += CACHE_LINE_ROUNDUP(size_pipe_profiles);
|
base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
|
||||||
|
|
||||||
if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
|
if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
|
||||||
base += CACHE_LINE_ROUNDUP(size_bmp_array);
|
base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
|
||||||
|
|
||||||
if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return base;
|
if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return base;
|
||||||
base += CACHE_LINE_ROUNDUP(size_queue_array);
|
base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
|
||||||
|
|
||||||
return base;
|
return base;
|
||||||
}
|
}
|
||||||
@ -617,7 +617,7 @@ rte_sched_port_config(struct rte_sched_port_params *params)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate memory to store the data structures */
|
/* Allocate memory to store the data structures */
|
||||||
port = rte_zmalloc("qos_params", mem_size, CACHE_LINE_SIZE);
|
port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
|
||||||
if (port == NULL) {
|
if (port == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ rte_table_acl_create(
|
|||||||
uint32_t action_table_size, acl_rule_list_size, acl_rule_memory_size;
|
uint32_t action_table_size, acl_rule_list_size, acl_rule_memory_size;
|
||||||
uint32_t total_size;
|
uint32_t total_size;
|
||||||
|
|
||||||
RTE_BUILD_BUG_ON(((sizeof(struct rte_table_acl) % CACHE_LINE_SIZE)
|
RTE_BUILD_BUG_ON(((sizeof(struct rte_table_acl) % RTE_CACHE_LINE_SIZE)
|
||||||
!= 0));
|
!= 0));
|
||||||
|
|
||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
@ -102,15 +102,15 @@ rte_table_acl_create(
|
|||||||
entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
|
entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
|
||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
action_table_size = CACHE_LINE_ROUNDUP(p->n_rules * entry_size);
|
action_table_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules * entry_size);
|
||||||
acl_rule_list_size =
|
acl_rule_list_size =
|
||||||
CACHE_LINE_ROUNDUP(p->n_rules * sizeof(struct rte_acl_rule *));
|
RTE_CACHE_LINE_ROUNDUP(p->n_rules * sizeof(struct rte_acl_rule *));
|
||||||
acl_rule_memory_size = CACHE_LINE_ROUNDUP(p->n_rules *
|
acl_rule_memory_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules *
|
||||||
RTE_ACL_RULE_SZ(p->n_rule_fields));
|
RTE_ACL_RULE_SZ(p->n_rule_fields));
|
||||||
total_size = sizeof(struct rte_table_acl) + action_table_size +
|
total_size = sizeof(struct rte_table_acl) + action_table_size +
|
||||||
acl_rule_list_size + acl_rule_memory_size;
|
acl_rule_list_size + acl_rule_memory_size;
|
||||||
|
|
||||||
acl = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE,
|
acl = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (acl == NULL) {
|
if (acl == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
|
@ -72,11 +72,11 @@ rte_table_array_create(void *params, int socket_id, uint32_t entry_size)
|
|||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
total_cl_size = (sizeof(struct rte_table_array) +
|
total_cl_size = (sizeof(struct rte_table_array) +
|
||||||
CACHE_LINE_SIZE) / CACHE_LINE_SIZE;
|
RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
|
||||||
total_cl_size += (p->n_entries * entry_size +
|
total_cl_size += (p->n_entries * entry_size +
|
||||||
CACHE_LINE_SIZE) / CACHE_LINE_SIZE;
|
RTE_CACHE_LINE_SIZE) / RTE_CACHE_LINE_SIZE;
|
||||||
total_size = total_cl_size * CACHE_LINE_SIZE;
|
total_size = total_cl_size * RTE_CACHE_LINE_SIZE;
|
||||||
t = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (t == NULL) {
|
if (t == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for array table\n",
|
"%s: Cannot allocate %u bytes for array table\n",
|
||||||
|
@ -180,24 +180,24 @@ rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
|
|||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create(p) != 0) ||
|
if ((check_params_create(p) != 0) ||
|
||||||
(!rte_is_power_of_2(entry_size)) ||
|
(!rte_is_power_of_2(entry_size)) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
(sizeof(struct bucket) != (CACHE_LINE_SIZE / 2)))
|
(sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
table_meta_sz = CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
|
table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
|
||||||
bucket_sz = CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
|
bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
|
||||||
bucket_ext_sz =
|
bucket_ext_sz =
|
||||||
CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(struct bucket));
|
RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(struct bucket));
|
||||||
key_sz = CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
|
key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
|
||||||
key_stack_sz = CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
|
key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
|
||||||
bkt_ext_stack_sz =
|
bkt_ext_stack_sz =
|
||||||
CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(uint32_t));
|
RTE_CACHE_LINE_ROUNDUP(p->n_buckets_ext * sizeof(uint32_t));
|
||||||
data_sz = CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
|
data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
|
||||||
total_size = table_meta_sz + bucket_sz + bucket_ext_sz + key_sz +
|
total_size = table_meta_sz + bucket_sz + bucket_ext_sz + key_sz +
|
||||||
key_stack_sz + bkt_ext_stack_sz + data_sz;
|
key_stack_sz + bkt_ext_stack_sz + data_sz;
|
||||||
|
|
||||||
t = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (t == NULL) {
|
if (t == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
|
@ -123,8 +123,8 @@ rte_table_hash_create_key16_lru(void *params,
|
|||||||
|
|
||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create_lru(p) != 0) ||
|
if ((check_params_create_lru(p) != 0) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
((sizeof(struct rte_bucket_4_16) % CACHE_LINE_SIZE) != 0))
|
((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
|
||||||
return NULL;
|
return NULL;
|
||||||
n_entries_per_bucket = 4;
|
n_entries_per_bucket = 4;
|
||||||
key_size = 16;
|
key_size = 16;
|
||||||
@ -133,11 +133,11 @@ rte_table_hash_create_key16_lru(void *params,
|
|||||||
n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
|
n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
|
||||||
n_entries_per_bucket);
|
n_entries_per_bucket);
|
||||||
bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
|
bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
|
||||||
* entry_size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE;
|
* entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
|
||||||
total_size = sizeof(struct rte_table_hash) + n_buckets *
|
total_size = sizeof(struct rte_table_hash) + n_buckets *
|
||||||
bucket_size_cl * CACHE_LINE_SIZE;
|
bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
f = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
@ -153,7 +153,7 @@ rte_table_hash_create_key16_lru(void *params,
|
|||||||
f->n_entries_per_bucket = n_entries_per_bucket;
|
f->n_entries_per_bucket = n_entries_per_bucket;
|
||||||
f->key_size = key_size;
|
f->key_size = key_size;
|
||||||
f->entry_size = entry_size;
|
f->entry_size = entry_size;
|
||||||
f->bucket_size = bucket_size_cl * CACHE_LINE_SIZE;
|
f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
f->signature_offset = p->signature_offset;
|
f->signature_offset = p->signature_offset;
|
||||||
f->key_offset = p->key_offset;
|
f->key_offset = p->key_offset;
|
||||||
f->f_hash = p->f_hash;
|
f->f_hash = p->f_hash;
|
||||||
@ -341,8 +341,8 @@ rte_table_hash_create_key16_ext(void *params,
|
|||||||
|
|
||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create_ext(p) != 0) ||
|
if ((check_params_create_ext(p) != 0) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
((sizeof(struct rte_bucket_4_16) % CACHE_LINE_SIZE) != 0))
|
((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
n_entries_per_bucket = 4;
|
n_entries_per_bucket = 4;
|
||||||
@ -354,14 +354,14 @@ rte_table_hash_create_key16_ext(void *params,
|
|||||||
n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
|
n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
|
||||||
n_entries_per_bucket;
|
n_entries_per_bucket;
|
||||||
bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
|
bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
|
||||||
* entry_size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE;
|
* entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
|
||||||
stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + CACHE_LINE_SIZE - 1)
|
stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
|
||||||
/ CACHE_LINE_SIZE;
|
/ RTE_CACHE_LINE_SIZE;
|
||||||
total_size = sizeof(struct rte_table_hash) +
|
total_size = sizeof(struct rte_table_hash) +
|
||||||
((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
|
((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
|
||||||
CACHE_LINE_SIZE;
|
RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
f = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
@ -377,7 +377,7 @@ rte_table_hash_create_key16_ext(void *params,
|
|||||||
f->n_entries_per_bucket = n_entries_per_bucket;
|
f->n_entries_per_bucket = n_entries_per_bucket;
|
||||||
f->key_size = key_size;
|
f->key_size = key_size;
|
||||||
f->entry_size = entry_size;
|
f->entry_size = entry_size;
|
||||||
f->bucket_size = bucket_size_cl * CACHE_LINE_SIZE;
|
f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
f->signature_offset = p->signature_offset;
|
f->signature_offset = p->signature_offset;
|
||||||
f->key_offset = p->key_offset;
|
f->key_offset = p->key_offset;
|
||||||
f->f_hash = p->f_hash;
|
f->f_hash = p->f_hash;
|
||||||
@ -608,7 +608,7 @@ rte_table_hash_entry_delete_key16_ext(
|
|||||||
bucket1 = (struct rte_bucket_4_16 *) \
|
bucket1 = (struct rte_bucket_4_16 *) \
|
||||||
&f->memory[bucket_index * f->bucket_size]; \
|
&f->memory[bucket_index * f->bucket_size]; \
|
||||||
rte_prefetch0(bucket1); \
|
rte_prefetch0(bucket1); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket1) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
|
#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
|
||||||
@ -684,7 +684,7 @@ rte_table_hash_entry_delete_key16_ext(
|
|||||||
buckets_mask |= bucket_mask; \
|
buckets_mask |= bucket_mask; \
|
||||||
bucket_next = bucket->next; \
|
bucket_next = bucket->next; \
|
||||||
rte_prefetch0(bucket_next); \
|
rte_prefetch0(bucket_next); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket_next) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
|
||||||
buckets[pkt_index] = bucket_next; \
|
buckets[pkt_index] = bucket_next; \
|
||||||
keys[pkt_index] = key; \
|
keys[pkt_index] = key; \
|
||||||
}
|
}
|
||||||
@ -741,14 +741,14 @@ rte_table_hash_entry_delete_key16_ext(
|
|||||||
bucket10 = (struct rte_bucket_4_16 *) \
|
bucket10 = (struct rte_bucket_4_16 *) \
|
||||||
&f->memory[bucket10_index * f->bucket_size]; \
|
&f->memory[bucket10_index * f->bucket_size]; \
|
||||||
rte_prefetch0(bucket10); \
|
rte_prefetch0(bucket10); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket10) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
|
||||||
\
|
\
|
||||||
signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
|
signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
|
||||||
bucket11_index = signature11 & (f->n_buckets - 1); \
|
bucket11_index = signature11 & (f->n_buckets - 1); \
|
||||||
bucket11 = (struct rte_bucket_4_16 *) \
|
bucket11 = (struct rte_bucket_4_16 *) \
|
||||||
&f->memory[bucket11_index * f->bucket_size]; \
|
&f->memory[bucket11_index * f->bucket_size]; \
|
||||||
rte_prefetch0(bucket11); \
|
rte_prefetch0(bucket11); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket11) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
|
#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
|
||||||
|
@ -123,8 +123,8 @@ rte_table_hash_create_key32_lru(void *params,
|
|||||||
|
|
||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create_lru(p) != 0) ||
|
if ((check_params_create_lru(p) != 0) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
((sizeof(struct rte_bucket_4_32) % CACHE_LINE_SIZE) != 0)) {
|
((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
n_entries_per_bucket = 4;
|
n_entries_per_bucket = 4;
|
||||||
@ -134,11 +134,11 @@ rte_table_hash_create_key32_lru(void *params,
|
|||||||
n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
|
n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
|
||||||
n_entries_per_bucket);
|
n_entries_per_bucket);
|
||||||
bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
|
bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
|
||||||
* entry_size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE;
|
* entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
|
||||||
total_size = sizeof(struct rte_table_hash) + n_buckets *
|
total_size = sizeof(struct rte_table_hash) + n_buckets *
|
||||||
bucket_size_cl * CACHE_LINE_SIZE;
|
bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
f = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
@ -154,7 +154,7 @@ rte_table_hash_create_key32_lru(void *params,
|
|||||||
f->n_entries_per_bucket = n_entries_per_bucket;
|
f->n_entries_per_bucket = n_entries_per_bucket;
|
||||||
f->key_size = key_size;
|
f->key_size = key_size;
|
||||||
f->entry_size = entry_size;
|
f->entry_size = entry_size;
|
||||||
f->bucket_size = bucket_size_cl * CACHE_LINE_SIZE;
|
f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
f->signature_offset = p->signature_offset;
|
f->signature_offset = p->signature_offset;
|
||||||
f->key_offset = p->key_offset;
|
f->key_offset = p->key_offset;
|
||||||
f->f_hash = p->f_hash;
|
f->f_hash = p->f_hash;
|
||||||
@ -343,8 +343,8 @@ rte_table_hash_create_key32_ext(void *params,
|
|||||||
|
|
||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create_ext(p) != 0) ||
|
if ((check_params_create_ext(p) != 0) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
((sizeof(struct rte_bucket_4_32) % CACHE_LINE_SIZE) != 0))
|
((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
n_entries_per_bucket = 4;
|
n_entries_per_bucket = 4;
|
||||||
@ -356,14 +356,14 @@ rte_table_hash_create_key32_ext(void *params,
|
|||||||
n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
|
n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
|
||||||
n_entries_per_bucket;
|
n_entries_per_bucket;
|
||||||
bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
|
bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
|
||||||
* entry_size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE;
|
* entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
|
||||||
stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + CACHE_LINE_SIZE - 1)
|
stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
|
||||||
/ CACHE_LINE_SIZE;
|
/ RTE_CACHE_LINE_SIZE;
|
||||||
total_size = sizeof(struct rte_table_hash) +
|
total_size = sizeof(struct rte_table_hash) +
|
||||||
((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
|
((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
|
||||||
CACHE_LINE_SIZE;
|
RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
f = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
@ -379,7 +379,7 @@ rte_table_hash_create_key32_ext(void *params,
|
|||||||
f->n_entries_per_bucket = n_entries_per_bucket;
|
f->n_entries_per_bucket = n_entries_per_bucket;
|
||||||
f->key_size = key_size;
|
f->key_size = key_size;
|
||||||
f->entry_size = entry_size;
|
f->entry_size = entry_size;
|
||||||
f->bucket_size = bucket_size_cl * CACHE_LINE_SIZE;
|
f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
f->signature_offset = p->signature_offset;
|
f->signature_offset = p->signature_offset;
|
||||||
f->key_offset = p->key_offset;
|
f->key_offset = p->key_offset;
|
||||||
f->f_hash = p->f_hash;
|
f->f_hash = p->f_hash;
|
||||||
@ -621,8 +621,8 @@ rte_table_hash_entry_delete_key32_ext(
|
|||||||
bucket1 = (struct rte_bucket_4_32 *) \
|
bucket1 = (struct rte_bucket_4_32 *) \
|
||||||
&f->memory[bucket_index * f->bucket_size]; \
|
&f->memory[bucket_index * f->bucket_size]; \
|
||||||
rte_prefetch0(bucket1); \
|
rte_prefetch0(bucket1); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket1) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
|
#define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
|
||||||
@ -698,9 +698,9 @@ rte_table_hash_entry_delete_key32_ext(
|
|||||||
buckets_mask |= bucket_mask; \
|
buckets_mask |= bucket_mask; \
|
||||||
bucket_next = bucket->next; \
|
bucket_next = bucket->next; \
|
||||||
rte_prefetch0(bucket_next); \
|
rte_prefetch0(bucket_next); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket_next) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
|
rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
|
||||||
2 * CACHE_LINE_SIZE)); \
|
2 * RTE_CACHE_LINE_SIZE)); \
|
||||||
buckets[pkt_index] = bucket_next; \
|
buckets[pkt_index] = bucket_next; \
|
||||||
keys[pkt_index] = key; \
|
keys[pkt_index] = key; \
|
||||||
}
|
}
|
||||||
@ -758,16 +758,16 @@ rte_table_hash_entry_delete_key32_ext(
|
|||||||
bucket10 = (struct rte_bucket_4_32 *) \
|
bucket10 = (struct rte_bucket_4_32 *) \
|
||||||
&f->memory[bucket10_index * f->bucket_size]; \
|
&f->memory[bucket10_index * f->bucket_size]; \
|
||||||
rte_prefetch0(bucket10); \
|
rte_prefetch0(bucket10); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket10) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
|
||||||
\
|
\
|
||||||
signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
|
signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
|
||||||
bucket11_index = signature11 & (f->n_buckets - 1); \
|
bucket11_index = signature11 & (f->n_buckets - 1); \
|
||||||
bucket11 = (struct rte_bucket_4_32 *) \
|
bucket11 = (struct rte_bucket_4_32 *) \
|
||||||
&f->memory[bucket11_index * f->bucket_size]; \
|
&f->memory[bucket11_index * f->bucket_size]; \
|
||||||
rte_prefetch0(bucket11); \
|
rte_prefetch0(bucket11); \
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket11) + CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
|
||||||
rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * CACHE_LINE_SIZE));\
|
rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
|
||||||
}
|
}
|
||||||
|
|
||||||
#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
|
#define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
|
||||||
|
@ -118,8 +118,8 @@ rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
|
|||||||
|
|
||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create_lru(p) != 0) ||
|
if ((check_params_create_lru(p) != 0) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
((sizeof(struct rte_bucket_4_8) % CACHE_LINE_SIZE) != 0)) {
|
((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
n_entries_per_bucket = 4;
|
n_entries_per_bucket = 4;
|
||||||
@ -129,11 +129,11 @@ rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
|
|||||||
n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
|
n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
|
||||||
n_entries_per_bucket);
|
n_entries_per_bucket);
|
||||||
bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
|
bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
|
||||||
entry_size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE;
|
entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
|
||||||
total_size = sizeof(struct rte_table_hash) + n_buckets *
|
total_size = sizeof(struct rte_table_hash) + n_buckets *
|
||||||
bucket_size_cl * CACHE_LINE_SIZE;
|
bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
f = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
@ -149,7 +149,7 @@ rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
|
|||||||
f->n_entries_per_bucket = n_entries_per_bucket;
|
f->n_entries_per_bucket = n_entries_per_bucket;
|
||||||
f->key_size = key_size;
|
f->key_size = key_size;
|
||||||
f->entry_size = entry_size;
|
f->entry_size = entry_size;
|
||||||
f->bucket_size = bucket_size_cl * CACHE_LINE_SIZE;
|
f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
f->signature_offset = p->signature_offset;
|
f->signature_offset = p->signature_offset;
|
||||||
f->key_offset = p->key_offset;
|
f->key_offset = p->key_offset;
|
||||||
f->f_hash = p->f_hash;
|
f->f_hash = p->f_hash;
|
||||||
@ -332,8 +332,8 @@ rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
|
|||||||
|
|
||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create_ext(p) != 0) ||
|
if ((check_params_create_ext(p) != 0) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
((sizeof(struct rte_bucket_4_8) % CACHE_LINE_SIZE) != 0))
|
((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
n_entries_per_bucket = 4;
|
n_entries_per_bucket = 4;
|
||||||
@ -345,14 +345,14 @@ rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
|
|||||||
n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
|
n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
|
||||||
n_entries_per_bucket;
|
n_entries_per_bucket;
|
||||||
bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
|
bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
|
||||||
entry_size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE;
|
entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
|
||||||
stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + CACHE_LINE_SIZE - 1)
|
stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
|
||||||
/ CACHE_LINE_SIZE;
|
/ RTE_CACHE_LINE_SIZE;
|
||||||
total_size = sizeof(struct rte_table_hash) + ((n_buckets +
|
total_size = sizeof(struct rte_table_hash) + ((n_buckets +
|
||||||
n_buckets_ext) * bucket_size_cl + stack_size_cl) *
|
n_buckets_ext) * bucket_size_cl + stack_size_cl) *
|
||||||
CACHE_LINE_SIZE;
|
RTE_CACHE_LINE_SIZE;
|
||||||
|
|
||||||
f = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
@ -368,7 +368,7 @@ rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
|
|||||||
f->n_entries_per_bucket = n_entries_per_bucket;
|
f->n_entries_per_bucket = n_entries_per_bucket;
|
||||||
f->key_size = key_size;
|
f->key_size = key_size;
|
||||||
f->entry_size = entry_size;
|
f->entry_size = entry_size;
|
||||||
f->bucket_size = bucket_size_cl * CACHE_LINE_SIZE;
|
f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
|
||||||
f->signature_offset = p->signature_offset;
|
f->signature_offset = p->signature_offset;
|
||||||
f->key_offset = p->key_offset;
|
f->key_offset = p->key_offset;
|
||||||
f->f_hash = p->f_hash;
|
f->f_hash = p->f_hash;
|
||||||
|
@ -155,21 +155,21 @@ rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
|
|||||||
/* Check input parameters */
|
/* Check input parameters */
|
||||||
if ((check_params_create(p) != 0) ||
|
if ((check_params_create(p) != 0) ||
|
||||||
(!rte_is_power_of_2(entry_size)) ||
|
(!rte_is_power_of_2(entry_size)) ||
|
||||||
((sizeof(struct rte_table_hash) % CACHE_LINE_SIZE) != 0) ||
|
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
|
||||||
(sizeof(struct bucket) != (CACHE_LINE_SIZE / 2))) {
|
(sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2))) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
table_meta_sz = CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
|
table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
|
||||||
bucket_sz = CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
|
bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
|
||||||
key_sz = CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
|
key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
|
||||||
key_stack_sz = CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
|
key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
|
||||||
data_sz = CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
|
data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
|
||||||
total_size = table_meta_sz + bucket_sz + key_sz + key_stack_sz +
|
total_size = table_meta_sz + bucket_sz + key_sz + key_stack_sz +
|
||||||
data_sz;
|
data_sz;
|
||||||
|
|
||||||
t = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id);
|
t = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
|
||||||
if (t == NULL) {
|
if (t == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
"%s: Cannot allocate %u bytes for hash table\n",
|
"%s: Cannot allocate %u bytes for hash table\n",
|
||||||
|
@ -96,7 +96,7 @@ rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
|
|||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
|
nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
|
||||||
total_size = sizeof(struct rte_table_lpm) + nht_size;
|
total_size = sizeof(struct rte_table_lpm) + nht_size;
|
||||||
lpm = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE,
|
lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (lpm == NULL) {
|
if (lpm == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
|
@ -102,7 +102,7 @@ rte_table_lpm_ipv6_create(void *params, int socket_id, uint32_t entry_size)
|
|||||||
/* Memory allocation */
|
/* Memory allocation */
|
||||||
nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
|
nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
|
||||||
total_size = sizeof(struct rte_table_lpm_ipv6) + nht_size;
|
total_size = sizeof(struct rte_table_lpm_ipv6) + nht_size;
|
||||||
lpm = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE,
|
lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
|
||||||
socket_id);
|
socket_id);
|
||||||
if (lpm == NULL) {
|
if (lpm == NULL) {
|
||||||
RTE_LOG(ERR, TABLE,
|
RTE_LOG(ERR, TABLE,
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
ARCH ?= powerpc
|
ARCH ?= powerpc
|
||||||
CROSS ?=
|
CROSS ?=
|
||||||
|
|
||||||
CPU_CFLAGS ?= -m64 -DCACHE_LINE_SIZE=128
|
CPU_CFLAGS ?= -m64 -DRTE_CACHE_LINE_SIZE=128
|
||||||
CPU_LDFLAGS ?=
|
CPU_LDFLAGS ?=
|
||||||
CPU_ASFLAGS ?= -felf64
|
CPU_ASFLAGS ?= -felf64
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user