mem: instrument allocator for ASan
This patch adds necessary hooks in the memory allocator for ASan. This feature is currently available in DPDK only on Linux x86_64. If other OS/architectures want to support it, ASAN_SHADOW_OFFSET must be defined and RTE_MALLOC_ASAN must be set accordingly in meson. Signed-off-by: Xueqin Lin <xueqin.lin@intel.com> Signed-off-by: Zhihong Peng <zhihongx.peng@intel.com> Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
This commit is contained in:
parent
6e0290250d
commit
6cc51b1293
@ -427,6 +427,10 @@ if get_option('b_sanitize') == 'address' or get_option('b_sanitize') == 'address
|
||||
add_project_link_arguments('-lasan', language: 'c')
|
||||
dpdk_extra_ldflags += '-lasan'
|
||||
endif
|
||||
|
||||
if is_linux and arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_64')
|
||||
dpdk_conf.set10('RTE_MALLOC_ASAN', true)
|
||||
endif
|
||||
endif
|
||||
|
||||
if get_option('default_library') == 'both'
|
||||
|
@ -31,3 +31,59 @@ Example::
|
||||
- The libasan package must be installed when compiling with gcc in Centos/RHEL.
|
||||
- If the program is tested using cmdline, you may need to execute the
|
||||
"stty echo" command when an error occurs.
|
||||
|
||||
ASan is aware of DPDK memory allocations, thanks to added instrumentation.
|
||||
This is only enabled on x86_64 at the moment.
|
||||
Other architectures may have to define ASAN_SHADOW_OFFSET.
|
||||
|
||||
Example heap-buffer-overflow error
|
||||
----------------------------------
|
||||
|
||||
Add below unit test code in examples/helloworld/main.c::
|
||||
|
||||
Add code to helloworld:
|
||||
char *p = rte_zmalloc(NULL, 9, 0);
|
||||
if (!p) {
|
||||
printf("rte_zmalloc error.\n");
|
||||
return -1;
|
||||
}
|
||||
p[9] = 'a';
|
||||
|
||||
Above code will result in heap-buffer-overflow error if ASan is enabled, because apply 9 bytes of memory but access the tenth byte, detailed error log as below::
|
||||
|
||||
==369953==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x7fb17f465809 at pc 0x5652e6707b84 bp 0x7ffea70eea20 sp 0x7ffea70eea10 WRITE of size 1 at 0x7fb17f465809 thread T0
|
||||
#0 0x5652e6707b83 in main ../examples/helloworld/main.c:47
|
||||
#1 0x7fb94953c0b2 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x270b2)
|
||||
#2 0x5652e67079bd in _start (/home/pzh/asan_test/x86_64-native-linuxapp-gcc/examples/dpdk-helloworld+0x8329bd)
|
||||
|
||||
Address 0x7fb17f465809 is a wild pointer.
|
||||
SUMMARY: AddressSanitizer: heap-buffer-overflow ../examples/helloworld/main.c:47 in main
|
||||
|
||||
Note::
|
||||
|
||||
- Some of the features of ASan (for example, 'Display memory application location, currently
|
||||
displayed as a wild pointer') are not currently supported with DPDK allocations.
|
||||
|
||||
Example use-after-free error
|
||||
----------------------------
|
||||
|
||||
Add below unit test code in examples/helloworld/main.c::
|
||||
|
||||
Add code to helloworld:
|
||||
char *p = rte_zmalloc(NULL, 9, 0);
|
||||
if (!p) {
|
||||
printf("rte_zmalloc error.\n");
|
||||
return -1;
|
||||
}
|
||||
rte_free(p);
|
||||
*p = 'a';
|
||||
|
||||
Above code will result in use-after-free error if ASan is enabled, because apply 9 bytes of memory but access the first byte after release, detailed error log as below::
|
||||
|
||||
==417048==ERROR: AddressSanitizer: heap-use-after-free on address 0x7fc83f465800 at pc 0x564308a39b89 bp 0x7ffc8c85bf50 sp 0x7ffc8c85bf40 WRITE of size 1 at 0x7fc83f465800 thread T0
|
||||
#0 0x564308a39b88 in main ../examples/helloworld/main.c:48
|
||||
#1 0x7fd0079c60b2 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x270b2)
|
||||
#2 0x564308a399bd in _start (/home/pzh/asan_test/x86_64-native-linuxapp-gcc/examples/dpdk-helloworld+0x8329bd)
|
||||
|
||||
Address 0x7fc83f465800 is a wild pointer.
|
||||
SUMMARY: AddressSanitizer: heap-use-after-free ../examples/helloworld/main.c:48 in main
|
||||
|
@ -446,6 +446,8 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
|
||||
struct malloc_elem *new_free_elem =
|
||||
RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
|
||||
|
||||
asan_clear_split_alloczone(new_free_elem);
|
||||
|
||||
split_elem(elem, new_free_elem);
|
||||
malloc_elem_free_list_insert(new_free_elem);
|
||||
|
||||
@ -458,6 +460,8 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
|
||||
elem->state = ELEM_BUSY;
|
||||
elem->pad = old_elem_size;
|
||||
|
||||
asan_clear_alloczone(elem);
|
||||
|
||||
/* put a dummy header in padding, to point to real element header */
|
||||
if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
|
||||
* is cache-line aligned */
|
||||
@ -470,12 +474,18 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
|
||||
return new_elem;
|
||||
}
|
||||
|
||||
asan_clear_split_alloczone(new_elem);
|
||||
|
||||
/* we are going to split the element in two. The original element
|
||||
* remains free, and the new element is the one allocated.
|
||||
* Re-insert original element, in case its new size makes it
|
||||
* belong on a different list.
|
||||
*/
|
||||
|
||||
split_elem(elem, new_elem);
|
||||
|
||||
asan_clear_alloczone(new_elem);
|
||||
|
||||
new_elem->state = ELEM_BUSY;
|
||||
malloc_elem_free_list_insert(elem);
|
||||
|
||||
@ -601,6 +611,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
|
||||
if (next && next_elem_is_adjacent(elem)) {
|
||||
len_after = RTE_PTR_DIFF(next, hide_end);
|
||||
if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
|
||||
asan_clear_split_alloczone(hide_end);
|
||||
|
||||
/* split after */
|
||||
split_elem(elem, hide_end);
|
||||
|
||||
@ -615,6 +627,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
|
||||
if (prev && prev_elem_is_adjacent(elem)) {
|
||||
len_before = RTE_PTR_DIFF(hide_start, elem);
|
||||
if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
|
||||
asan_clear_split_alloczone(hide_start);
|
||||
|
||||
/* split before */
|
||||
split_elem(elem, hide_start);
|
||||
|
||||
@ -628,6 +642,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
|
||||
}
|
||||
}
|
||||
|
||||
asan_clear_alloczone(elem);
|
||||
|
||||
remove_elem(elem);
|
||||
}
|
||||
|
||||
@ -641,8 +657,10 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
|
||||
const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
|
||||
|
||||
/* if we request a smaller size, then always return ok */
|
||||
if (elem->size >= new_size)
|
||||
if (elem->size >= new_size) {
|
||||
asan_clear_alloczone(elem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* check if there is a next element, it's free and adjacent */
|
||||
if (!elem->next || elem->next->state != ELEM_FREE ||
|
||||
@ -661,9 +679,15 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
|
||||
/* now we have a big block together. Lets cut it down a bit, by splitting */
|
||||
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
|
||||
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
|
||||
|
||||
asan_clear_split_alloczone(split_pt);
|
||||
|
||||
split_elem(elem, split_pt);
|
||||
malloc_elem_free_list_insert(split_pt);
|
||||
}
|
||||
|
||||
asan_clear_alloczone(elem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -36,10 +36,20 @@ struct malloc_elem {
|
||||
uint64_t header_cookie; /* Cookie marking start of data */
|
||||
/* trailer cookie at start + size */
|
||||
#endif
|
||||
#ifdef RTE_MALLOC_ASAN
|
||||
size_t user_size;
|
||||
uint64_t asan_cookie[2]; /* must be next to header_cookie */
|
||||
#endif
|
||||
} __rte_cache_aligned;
|
||||
|
||||
static const unsigned int MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
|
||||
|
||||
#ifndef RTE_MALLOC_DEBUG
|
||||
static const unsigned MALLOC_ELEM_TRAILER_LEN = 0;
|
||||
#ifdef RTE_MALLOC_ASAN
|
||||
static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
|
||||
#else
|
||||
static const unsigned int MALLOC_ELEM_TRAILER_LEN;
|
||||
#endif
|
||||
|
||||
/* dummy function - just check if pointer is non-null */
|
||||
static inline int
|
||||
@ -55,7 +65,7 @@ set_trailer(struct malloc_elem *elem __rte_unused){ }
|
||||
|
||||
|
||||
#else
|
||||
static const unsigned MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
|
||||
static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
|
||||
|
||||
#define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */
|
||||
#define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/
|
||||
@ -90,9 +100,193 @@ malloc_elem_cookies_ok(const struct malloc_elem *elem)
|
||||
|
||||
#endif
|
||||
|
||||
static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
|
||||
#define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN)
|
||||
|
||||
#ifdef RTE_MALLOC_ASAN
|
||||
|
||||
#ifdef RTE_ARCH_X86_64
|
||||
#define ASAN_SHADOW_OFFSET 0x00007fff8000
|
||||
#endif
|
||||
|
||||
#define ASAN_SHADOW_GRAIN_SIZE 8
|
||||
#define ASAN_MEM_FREE_FLAG 0xfd
|
||||
#define ASAN_MEM_REDZONE_FLAG 0xfa
|
||||
#define ASAN_SHADOW_SCALE 3
|
||||
|
||||
#define ASAN_MEM_SHIFT(mem) ((void *)((uintptr_t)(mem) >> ASAN_SHADOW_SCALE))
|
||||
#define ASAN_MEM_TO_SHADOW(mem) \
|
||||
RTE_PTR_ADD(ASAN_MEM_SHIFT(mem), ASAN_SHADOW_OFFSET)
|
||||
|
||||
#if defined(__clang__)
|
||||
#define __rte_no_asan __attribute__((no_sanitize("address", "hwaddress")))
|
||||
#else
|
||||
#define __rte_no_asan __attribute__((no_sanitize_address))
|
||||
#endif
|
||||
|
||||
__rte_no_asan
|
||||
static inline void
|
||||
asan_set_shadow(void *addr, char val)
|
||||
{
|
||||
*(char *)addr = val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
asan_set_zone(void *ptr, size_t len, uint32_t val)
|
||||
{
|
||||
size_t offset, i;
|
||||
void *shadow;
|
||||
size_t zone_len = len / ASAN_SHADOW_GRAIN_SIZE;
|
||||
if (len % ASAN_SHADOW_GRAIN_SIZE != 0)
|
||||
zone_len += 1;
|
||||
|
||||
for (i = 0; i < zone_len; i++) {
|
||||
offset = i * ASAN_SHADOW_GRAIN_SIZE;
|
||||
shadow = ASAN_MEM_TO_SHADOW((uintptr_t)ptr + offset);
|
||||
asan_set_shadow(shadow, val);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* When the memory is released, the release mark is
|
||||
* set in the corresponding range of the shadow area.
|
||||
*/
|
||||
static inline void
|
||||
asan_set_freezone(void *ptr, size_t size)
|
||||
{
|
||||
asan_set_zone(ptr, size, ASAN_MEM_FREE_FLAG);
|
||||
}
|
||||
|
||||
/*
|
||||
* When the memory is allocated, memory state must set as accessible.
|
||||
*/
|
||||
static inline void
|
||||
asan_clear_alloczone(struct malloc_elem *elem)
|
||||
{
|
||||
asan_set_zone((void *)elem, elem->size, 0x0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
asan_clear_split_alloczone(struct malloc_elem *elem)
|
||||
{
|
||||
void *ptr = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
|
||||
asan_set_zone(ptr, MALLOC_ELEM_OVERHEAD, 0x0);
|
||||
}
|
||||
|
||||
/*
|
||||
* When the memory is allocated, the memory boundary is
|
||||
* marked in the corresponding range of the shadow area.
|
||||
* Requirement: redzone >= 16, is a power of two.
|
||||
*/
|
||||
static inline void
|
||||
asan_set_redzone(struct malloc_elem *elem, size_t user_size)
|
||||
{
|
||||
uintptr_t head_redzone;
|
||||
uintptr_t tail_redzone;
|
||||
void *front_shadow;
|
||||
void *tail_shadow;
|
||||
uint32_t val;
|
||||
|
||||
if (elem != NULL) {
|
||||
if (elem->state != ELEM_PAD)
|
||||
elem = RTE_PTR_ADD(elem, elem->pad);
|
||||
|
||||
elem->user_size = user_size;
|
||||
|
||||
/* Set mark before the start of the allocated memory */
|
||||
head_redzone = (uintptr_t)RTE_PTR_ADD(elem,
|
||||
MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE);
|
||||
front_shadow = ASAN_MEM_TO_SHADOW(head_redzone);
|
||||
asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG);
|
||||
front_shadow = ASAN_MEM_TO_SHADOW(head_redzone
|
||||
- ASAN_SHADOW_GRAIN_SIZE);
|
||||
asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG);
|
||||
|
||||
/* Set mark after the end of the allocated memory */
|
||||
tail_redzone = (uintptr_t)RTE_PTR_ADD(elem,
|
||||
MALLOC_ELEM_HEADER_LEN
|
||||
+ elem->user_size);
|
||||
tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone);
|
||||
val = (tail_redzone % ASAN_SHADOW_GRAIN_SIZE);
|
||||
val = (val == 0) ? ASAN_MEM_REDZONE_FLAG : val;
|
||||
asan_set_shadow(tail_shadow, val);
|
||||
tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone
|
||||
+ ASAN_SHADOW_GRAIN_SIZE);
|
||||
asan_set_shadow(tail_shadow, ASAN_MEM_REDZONE_FLAG);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* When the memory is released, the mark of the memory boundary
|
||||
* in the corresponding range of the shadow area is cleared.
|
||||
* Requirement: redzone >= 16, is a power of two.
|
||||
*/
|
||||
static inline void
|
||||
asan_clear_redzone(struct malloc_elem *elem)
|
||||
{
|
||||
uintptr_t head_redzone;
|
||||
uintptr_t tail_redzone;
|
||||
void *head_shadow;
|
||||
void *tail_shadow;
|
||||
|
||||
if (elem != NULL) {
|
||||
elem = RTE_PTR_ADD(elem, elem->pad);
|
||||
|
||||
/* Clear mark before the start of the allocated memory */
|
||||
head_redzone = (uintptr_t)RTE_PTR_ADD(elem,
|
||||
MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE);
|
||||
head_shadow = ASAN_MEM_TO_SHADOW(head_redzone);
|
||||
asan_set_shadow(head_shadow, 0x00);
|
||||
head_shadow = ASAN_MEM_TO_SHADOW(head_redzone
|
||||
- ASAN_SHADOW_GRAIN_SIZE);
|
||||
asan_set_shadow(head_shadow, 0x00);
|
||||
|
||||
/* Clear mark after the end of the allocated memory */
|
||||
tail_redzone = (uintptr_t)RTE_PTR_ADD(elem,
|
||||
MALLOC_ELEM_HEADER_LEN + elem->user_size);
|
||||
tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone);
|
||||
asan_set_shadow(tail_shadow, 0x00);
|
||||
tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone
|
||||
+ ASAN_SHADOW_GRAIN_SIZE);
|
||||
asan_set_shadow(tail_shadow, 0x00);
|
||||
}
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
old_malloc_size(struct malloc_elem *elem)
|
||||
{
|
||||
if (elem->state != ELEM_PAD)
|
||||
elem = RTE_PTR_ADD(elem, elem->pad);
|
||||
|
||||
return elem->user_size;
|
||||
}
|
||||
|
||||
#else /* !RTE_MALLOC_ASAN */
|
||||
|
||||
#define __rte_no_asan
|
||||
|
||||
static inline void
|
||||
asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { }
|
||||
|
||||
static inline void
|
||||
asan_clear_alloczone(struct malloc_elem *elem __rte_unused) { }
|
||||
|
||||
static inline void
|
||||
asan_clear_split_alloczone(struct malloc_elem *elem __rte_unused) { }
|
||||
|
||||
static inline void
|
||||
asan_set_redzone(struct malloc_elem *elem __rte_unused,
|
||||
size_t user_size __rte_unused) { }
|
||||
|
||||
static inline void
|
||||
asan_clear_redzone(struct malloc_elem *elem __rte_unused) { }
|
||||
|
||||
static inline size_t
|
||||
old_malloc_size(struct malloc_elem *elem)
|
||||
{
|
||||
return elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
|
||||
}
|
||||
#endif /* !RTE_MALLOC_ASAN */
|
||||
|
||||
/*
|
||||
* Given a pointer to the start of a memory block returned by malloc, get
|
||||
* the actual malloc_elem header for that block.
|
||||
|
@ -237,6 +237,7 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
|
||||
unsigned int flags, size_t align, size_t bound, bool contig)
|
||||
{
|
||||
struct malloc_elem *elem;
|
||||
size_t user_size = size;
|
||||
|
||||
size = RTE_CACHE_LINE_ROUNDUP(size);
|
||||
align = RTE_CACHE_LINE_ROUNDUP(align);
|
||||
@ -250,6 +251,8 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
|
||||
|
||||
/* increase heap's count of allocated elements */
|
||||
heap->alloc_count++;
|
||||
|
||||
asan_set_redzone(elem, user_size);
|
||||
}
|
||||
|
||||
return elem == NULL ? NULL : (void *)(&elem[1]);
|
||||
@ -270,6 +273,8 @@ heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
|
||||
|
||||
/* increase heap's count of allocated elements */
|
||||
heap->alloc_count++;
|
||||
|
||||
asan_set_redzone(elem, size);
|
||||
}
|
||||
|
||||
return elem == NULL ? NULL : (void *)(&elem[1]);
|
||||
@ -841,6 +846,8 @@ malloc_heap_free(struct malloc_elem *elem)
|
||||
if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
|
||||
return -1;
|
||||
|
||||
asan_clear_redzone(elem);
|
||||
|
||||
/* elem may be merged with previous element, so keep heap address */
|
||||
heap = elem->heap;
|
||||
msl = elem->msl;
|
||||
@ -848,6 +855,9 @@ malloc_heap_free(struct malloc_elem *elem)
|
||||
|
||||
rte_spinlock_lock(&(heap->lock));
|
||||
|
||||
void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad);
|
||||
size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad;
|
||||
|
||||
/* mark element as free */
|
||||
elem->state = ELEM_FREE;
|
||||
|
||||
@ -1001,6 +1011,8 @@ malloc_heap_free(struct malloc_elem *elem)
|
||||
|
||||
rte_mcfg_mem_write_unlock();
|
||||
free_unlock:
|
||||
asan_set_freezone(asan_ptr, asan_data_len);
|
||||
|
||||
rte_spinlock_unlock(&(heap->lock));
|
||||
return ret;
|
||||
}
|
||||
|
@ -162,6 +162,8 @@ rte_calloc(const char *type, size_t num, size_t size, unsigned align)
|
||||
void *
|
||||
rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
|
||||
{
|
||||
size_t user_size;
|
||||
|
||||
if (ptr == NULL)
|
||||
return rte_malloc_socket(NULL, size, align, socket);
|
||||
|
||||
@ -171,6 +173,8 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
user_size = size;
|
||||
|
||||
size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
|
||||
|
||||
/* check requested socket id and alignment matches first, and if ok,
|
||||
@ -181,6 +185,9 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
|
||||
RTE_PTR_ALIGN(ptr, align) == ptr &&
|
||||
malloc_heap_resize(elem, size) == 0) {
|
||||
rte_eal_trace_mem_realloc(size, align, socket, ptr);
|
||||
|
||||
asan_set_redzone(elem, user_size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
@ -192,7 +199,7 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
|
||||
if (new_ptr == NULL)
|
||||
return NULL;
|
||||
/* elem: |pad|data_elem|data|trailer| */
|
||||
const size_t old_size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
|
||||
const size_t old_size = old_malloc_size(elem);
|
||||
rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
|
||||
rte_free(ptr);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user