numam-dpdk/lib/mempool/version.map
Dmitry Kozlyuk da2b9cb25e mempool: add event callbacks
Data path performance can benefit if the PMD knows which memory it will
need to handle in advance, before the first mbuf is sent to the PMD.
It is impractical, however, to consider all allocated memory for this
purpose. Most often mbuf memory comes from mempools that can come and
go. PMD can enumerate existing mempools on device start, but it also
needs to track creation and destruction of mempools after the forwarding
starts but before an mbuf from the new mempool is sent to the device.

Add an API to register callback for mempool life cycle events:
* rte_mempool_event_callback_register()
* rte_mempool_event_callback_unregister()
Currently tracked events are:
* RTE_MEMPOOL_EVENT_READY (after populating a mempool)
* RTE_MEMPOOL_EVENT_DESTROY (before freeing a mempool)
Provide a unit test for the new API.
The new API is internal, because it is primarily demanded by PMDs that
may need to deal with any mempools and do not control their creation,
while an application, on the other hand, knows which mempools it creates
and doesn't care about internal mempools PMDs might create.

Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2021-10-19 16:35:16 +02:00

75 lines
1.8 KiB
Plaintext

DPDK_22 {
global:
rte_mempool_audit;
rte_mempool_avail_count;
rte_mempool_cache_create;
rte_mempool_cache_free;
rte_mempool_calc_obj_size;
rte_mempool_check_cookies;
rte_mempool_contig_blocks_check_cookies;
rte_mempool_create;
rte_mempool_create_empty;
rte_mempool_dump;
rte_mempool_free;
rte_mempool_get_page_size;
rte_mempool_in_use_count;
rte_mempool_list_dump;
rte_mempool_lookup;
rte_mempool_mem_iter;
rte_mempool_obj_iter;
rte_mempool_op_calc_mem_size_default;
rte_mempool_op_calc_mem_size_helper;
rte_mempool_op_populate_default;
rte_mempool_op_populate_helper;
rte_mempool_ops_get_info;
rte_mempool_ops_table;
rte_mempool_populate_anon;
rte_mempool_populate_default;
rte_mempool_populate_iova;
rte_mempool_populate_virt;
rte_mempool_register_ops;
rte_mempool_set_ops_byname;
rte_mempool_walk;
local: *;
};
EXPERIMENTAL {
global:
# added in 20.05
__rte_mempool_trace_ops_dequeue_bulk;
__rte_mempool_trace_ops_dequeue_contig_blocks;
__rte_mempool_trace_ops_enqueue_bulk;
__rte_mempool_trace_generic_put;
__rte_mempool_trace_put_bulk;
__rte_mempool_trace_generic_get;
__rte_mempool_trace_get_bulk;
__rte_mempool_trace_get_contig_blocks;
__rte_mempool_trace_create;
__rte_mempool_trace_create_empty;
__rte_mempool_trace_free;
__rte_mempool_trace_populate_iova;
__rte_mempool_trace_populate_virt;
__rte_mempool_trace_populate_default;
__rte_mempool_trace_populate_anon;
__rte_mempool_trace_cache_create;
__rte_mempool_trace_cache_free;
__rte_mempool_trace_default_cache;
__rte_mempool_trace_get_page_size;
__rte_mempool_trace_cache_flush;
__rte_mempool_trace_ops_populate;
__rte_mempool_trace_ops_alloc;
__rte_mempool_trace_ops_free;
__rte_mempool_trace_set_ops_byname;
};
INTERNAL {
global:
# added in 21.11
rte_mempool_event_callback_register;
rte_mempool_event_callback_unregister;
};