271 lines
6.6 KiB
Plaintext
Raw Normal View History

DPDK_21 {
global:
rte_eth_add_first_rx_callback;
rte_eth_add_rx_callback;
rte_eth_add_tx_callback;
rte_eth_allmulticast_disable;
rte_eth_allmulticast_enable;
rte_eth_allmulticast_get;
rte_eth_dev_adjust_nb_rx_tx_desc;
rte_eth_dev_callback_register;
rte_eth_dev_callback_unregister;
rte_eth_dev_close;
rte_eth_dev_configure;
rte_eth_dev_count_avail;
rte_eth_dev_count_total;
rte_eth_dev_default_mac_addr_set;
rte_eth_dev_flow_ctrl_get;
rte_eth_dev_flow_ctrl_set;
rte_eth_dev_fw_version_get;
rte_eth_dev_get_dcb_info;
rte_eth_dev_get_eeprom;
rte_eth_dev_get_eeprom_length;
rte_eth_dev_get_mtu;
rte_eth_dev_get_name_by_port;
rte_eth_dev_get_port_by_name;
rte_eth_dev_get_reg_info;
rte_eth_dev_get_sec_ctx;
rte_eth_dev_get_supported_ptypes;
rte_eth_dev_get_vlan_offload;
rte_eth_dev_info_get;
rte_eth_dev_is_valid_port;
rte_eth_dev_logtype;
rte_eth_dev_mac_addr_add;
rte_eth_dev_mac_addr_remove;
rte_eth_dev_pool_ops_supported;
rte_eth_dev_priority_flow_ctrl_set;
rte_eth_dev_reset;
rte_eth_dev_rss_hash_conf_get;
rte_eth_dev_rss_hash_update;
rte_eth_dev_rss_reta_query;
rte_eth_dev_rss_reta_update;
rte_eth_dev_rx_intr_ctl;
rte_eth_dev_rx_intr_ctl_q;
rte_eth_dev_rx_intr_disable;
rte_eth_dev_rx_intr_enable;
rte_eth_dev_rx_offload_name;
rte_eth_dev_rx_queue_start;
rte_eth_dev_rx_queue_stop;
rte_eth_dev_set_eeprom;
rte_eth_dev_set_link_down;
rte_eth_dev_set_link_up;
rte_eth_dev_set_mc_addr_list;
rte_eth_dev_set_mtu;
rte_eth_dev_set_rx_queue_stats_mapping;
rte_eth_dev_set_tx_queue_stats_mapping;
rte_eth_dev_set_vlan_ether_type;
rte_eth_dev_set_vlan_offload;
rte_eth_dev_set_vlan_pvid;
rte_eth_dev_set_vlan_strip_on_queue;
rte_eth_dev_socket_id;
rte_eth_dev_start;
rte_eth_dev_stop;
rte_eth_dev_tx_offload_name;
rte_eth_dev_tx_queue_start;
rte_eth_dev_tx_queue_stop;
rte_eth_dev_uc_all_hash_table_set;
rte_eth_dev_uc_hash_table_set;
rte_eth_dev_udp_tunnel_port_add;
rte_eth_dev_udp_tunnel_port_delete;
rte_eth_dev_vlan_filter;
rte_eth_devices;
rte_eth_find_next;
rte_eth_find_next_owned_by;
rte_eth_iterator_cleanup;
rte_eth_iterator_init;
rte_eth_iterator_next;
rte_eth_led_off;
rte_eth_led_on;
rte_eth_link_get;
rte_eth_link_get_nowait;
rte_eth_macaddr_get;
rte_eth_mirror_rule_reset;
rte_eth_mirror_rule_set;
rte_eth_promiscuous_disable;
rte_eth_promiscuous_enable;
rte_eth_promiscuous_get;
rte_eth_remove_rx_callback;
rte_eth_remove_tx_callback;
rte_eth_rx_queue_info_get;
rte_eth_rx_queue_setup;
rte_eth_set_queue_rate_limit;
rte_eth_speed_bitflag;
rte_eth_stats_get;
rte_eth_stats_reset;
rte_eth_timesync_adjust_time;
rte_eth_timesync_disable;
rte_eth_timesync_enable;
rte_eth_timesync_read_rx_timestamp;
rte_eth_timesync_read_time;
rte_eth_timesync_read_tx_timestamp;
rte_eth_timesync_write_time;
rte_eth_tx_buffer_count_callback;
rte_eth_tx_buffer_drop_callback;
rte_eth_tx_buffer_init;
rte_eth_tx_buffer_set_err_callback;
rte_eth_tx_done_cleanup;
rte_eth_tx_queue_info_get;
rte_eth_tx_queue_setup;
rte_eth_xstats_get;
rte_eth_xstats_get_by_id;
rte_eth_xstats_get_id_by_name;
rte_eth_xstats_get_names;
rte_eth_xstats_get_names_by_id;
rte_eth_xstats_reset;
rte_flow_copy;
rte_flow_create;
rte_flow_destroy;
rte_flow_error_set;
rte_flow_flush;
rte_flow_isolate;
rte_flow_query;
rte_flow_validate;
local: *;
};
EXPERIMENTAL {
global:
# added in 17.11
rte_mtr_capabilities_get;
rte_mtr_create;
rte_mtr_destroy;
rte_mtr_meter_disable;
rte_mtr_meter_dscp_table_update;
rte_mtr_meter_enable;
rte_mtr_meter_profile_add;
rte_mtr_meter_profile_delete;
rte_mtr_meter_profile_update;
rte_mtr_policer_actions_update;
rte_mtr_stats_read;
rte_mtr_stats_update;
# added in 18.02
rte_eth_dev_is_removed;
rte_eth_dev_owner_delete;
rte_eth_dev_owner_get;
rte_eth_dev_owner_new;
rte_eth_dev_owner_set;
rte_eth_dev_owner_unset;
# added in 18.05
rte_eth_dev_get_module_eeprom;
rte_eth_dev_get_module_info;
# added in 18.11
rte_eth_dev_rx_intr_ctl_q_get_fd;
rte_flow_conv;
# added in 19.05
rte_eth_find_next_of;
rte_eth_find_next_sibling;
# added in 19.08
rte_eth_read_clock;
# added in 19.11
rte_eth_dev_hairpin_capability_get;
rte_eth_rx_burst_mode_get;
rte_eth_rx_hairpin_queue_setup;
rte_eth_tx_burst_mode_get;
rte_eth_tx_hairpin_queue_setup;
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
rte_flow_dynf_metadata_offs;
rte_flow_dynf_metadata_mask;
rte_flow_dynf_metadata_register;
rte_eth_dev_set_ptypes;
# added in 20.02
rte_flow_dev_dump;
# added in 20.05
__rte_ethdev_trace_configure;
__rte_ethdev_trace_rxq_setup;
__rte_ethdev_trace_txq_setup;
__rte_ethdev_trace_start;
__rte_ethdev_trace_stop;
__rte_ethdev_trace_close;
__rte_ethdev_trace_rx_burst;
__rte_ethdev_trace_tx_burst;
rte_flow_get_aged_flows;
# Marked as experimental in 20.11
rte_tm_capabilities_get;
rte_tm_get_number_of_leaf_nodes;
rte_tm_hierarchy_commit;
rte_tm_level_capabilities_get;
rte_tm_mark_ip_dscp;
rte_tm_mark_ip_ecn;
rte_tm_mark_vlan_dei;
rte_tm_node_add;
rte_tm_node_capabilities_get;
rte_tm_node_cman_update;
rte_tm_node_delete;
rte_tm_node_parent_update;
rte_tm_node_resume;
rte_tm_node_shaper_update;
rte_tm_node_shared_shaper_update;
rte_tm_node_shared_wred_context_update;
rte_tm_node_stats_read;
rte_tm_node_stats_update;
rte_tm_node_suspend;
rte_tm_node_type_get;
rte_tm_node_wfq_weight_mode_update;
rte_tm_node_wred_context_update;
rte_tm_shaper_profile_add;
rte_tm_shaper_profile_delete;
rte_tm_shared_shaper_add_update;
rte_tm_shared_shaper_delete;
rte_tm_shared_wred_context_add_update;
rte_tm_shared_wred_context_delete;
rte_tm_wred_profile_add;
rte_tm_wred_profile_delete;
# added in 20.11
ethdev: add hairpin bind and unbind API In single port hairpin mode, all the hairpin Tx and Rx queues belong to the same device. After the queues are set up properly, there is no other dependency between the Tx queue and its Rx peer queue. The binding process that connected the Tx and Rx queues together from hardware level will be done automatically during the device start procedure. Everything required is configured and initialized already for the binding process. But in two ports hairpin mode, there will be some cross-dependences between two different ports. Usually, the ports will be initialized serially by the main thread but not in parallel. The earlier port will not be able to enable the bind if the following peer port is not yet configured with HW resources. What's more, if one port is detached / attached dynamically, it would introduce more trouble for the hairpin binding. To overcome these, new APIs for binding and unbinding are added. During startup, only the hairpin Tx and Rx peer queues will be set up. Nothing will be done when starting the device if the queues are without auto-bind attribute. Only after the required ports pair started, the `rte_eth_hairpin_bind()` API can be called to bind the all Tx queues of the egress port to the Rx queues of the peer port. Then the connection between the egress and ingress ports pair will be established. The `rte_eth_hairpin_unbind()` API could be used to disconnect the egress and the peer ingress ports. This should only be called before the device is closed if needed. When doing the clean up, all the egress and ingress pairs related to a single port should be taken into consideration, especially in the hot unplug case. mode is described. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-15 21:08:51 +08:00
rte_eth_hairpin_bind;
rte_eth_hairpin_get_peer_ports;
ethdev: add hairpin bind and unbind API In single port hairpin mode, all the hairpin Tx and Rx queues belong to the same device. After the queues are set up properly, there is no other dependency between the Tx queue and its Rx peer queue. The binding process that connected the Tx and Rx queues together from hardware level will be done automatically during the device start procedure. Everything required is configured and initialized already for the binding process. But in two ports hairpin mode, there will be some cross-dependences between two different ports. Usually, the ports will be initialized serially by the main thread but not in parallel. The earlier port will not be able to enable the bind if the following peer port is not yet configured with HW resources. What's more, if one port is detached / attached dynamically, it would introduce more trouble for the hairpin binding. To overcome these, new APIs for binding and unbinding are added. During startup, only the hairpin Tx and Rx peer queues will be set up. Nothing will be done when starting the device if the queues are without auto-bind attribute. Only after the required ports pair started, the `rte_eth_hairpin_bind()` API can be called to bind the all Tx queues of the egress port to the Rx queues of the peer port. Then the connection between the egress and ingress ports pair will be established. The `rte_eth_hairpin_unbind()` API could be used to disconnect the egress and the peer ingress ports. This should only be called before the device is closed if needed. When doing the clean up, all the egress and ingress pairs related to a single port should be taken into consideration, especially in the hot unplug case. mode is described. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-15 21:08:51 +08:00
rte_eth_hairpin_unbind;
rte_eth_link_speed_to_str;
rte_eth_link_to_str;
rte_eth_fec_get_capability;
rte_eth_fec_get;
rte_eth_fec_set;
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
rte_flow_shared_action_create;
rte_flow_shared_action_destroy;
rte_flow_shared_action_query;
rte_flow_shared_action_update;
ethdev: add tunnel offload model rte_flow API provides the building blocks for vendor-agnostic flow classification offloads. The rte_flow "patterns" and "actions" primitives are fine-grained, thus enabling DPDK applications the flexibility to offload network stacks and complex pipelines. Applications wishing to offload tunneled traffic are required to use the rte_flow primitives, such as group, meta, mark, tag, and others to model their high-level objects. The hardware model design for high-level software objects is not trivial. Furthermore, an optimal design is often vendor-specific. When hardware offloads tunneled traffic in multi-group logic, partially offloaded packets may arrive to the application after they were modified in hardware. In this case, the application may need to restore the original packet headers. Consider the following sequence: The application decaps a packet in one group and jumps to a second group where it tries to match on a 5-tuple, that will miss and send the packet to the application. In this case, the application does not receive the original packet but a modified one. Also, in this case, the application cannot match on the outer header fields, such as VXLAN vni and 5-tuple. There are several possible ways to use rte_flow "patterns" and "actions" to resolve the issues above. For example: 1 Mapping headers to a hardware registers using the rte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects. 2 Apply the decap only at the last offload stage after all the "patterns" were matched and the packet will be fully offloaded. Every approach has its pros and cons and is highly dependent on the hardware vendor. For example, some hardware may have a limited number of registers while other hardware could not support inner actions and must decap before accessing inner headers. The tunnel offload model resolves these issues. The model goals are: 1 Provide a unified application API to offload tunneled traffic that is capable to match on outer headers after decap. 2 Allow the application to restore the outer header of partially offloaded packets. The tunnel offload model does not introduce new elements to the existing RTE flow model and is implemented as a set of helper functions. For the application to work with the tunnel offload API it has to adjust flow rules in multi-table tunnel offload in the following way: 1 Remove explicit call to decap action and replace it with PMD actions obtained from rte_flow_tunnel_decap_and_set() helper. 2 Add PMD items obtained from rte_flow_tunnel_match() helper to all other rules in the tunnel offload sequence. VXLAN Code example: Assume application needs to do inner NAT on the VXLAN packet. The first rule in group 0: flow create <port id> ingress group 0 pattern eth / ipv4 / udp dst is 4789 / vxlan / end actions {pmd actions} / jump group 3 / end The first VXLAN packet that arrives matches the rule in group 0 and jumps to group 3. In group 3 the packet will miss since there is no flow to match and will be sent to the application. Application will call rte_flow_get_restore_info() to get the packet outer header. Application will insert a new rule in group 3 to match outer and inner headers: flow create <port id> ingress group 3 pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 / end actions set_ipv4_dst 186.1.1.1 / queue index 3 / end Resulting of the rules will be that VXLAN packet with vni=10, outer IPv4 dst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received decapped on queue 3 with IPv4 dst=186.1.1.1 Note: The packet in group 3 is considered decapped. All actions in that group will be done on the header that was inner before decap. The application may specify an outer header to be matched on. It's PMD responsibility to translate these items to outer metadata. API usage: /** * 1. Initiate RTE flow tunnel object */ const struct rte_flow_tunnel tunnel = { .type = RTE_FLOW_ITEM_TYPE_VXLAN, .tun_id = 10, } /** * 2. Obtain PMD tunnel actions * * pmd_actions is an intermediate variable application uses to * compile actions array */ struct rte_flow_action **pmd_actions; rte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions, &num_pmd_actions, &error); /** * 3. offload the first rule * matching on VXLAN traffic and jumps to group 3 * (implicitly decaps packet) */ app_actions = jump group 3 rule_items = app_items; /** eth / ipv4 / udp / vxlan */ rule_actions = { pmd_actions, app_actions }; attr.group = 0; flow_1 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 4. after flow creation application does not need to keep the * tunnel action resources. */ rte_flow_tunnel_action_release(port_id, pmd_actions, num_pmd_actions); /** * 5. After partially offloaded packet miss because there was no * matching rule handle miss on group 3 */ struct rte_flow_restore_info info; rte_flow_get_restore_info(port_id, mbuf, &info, &error); /** * 6. Offload NAT rule: */ app_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 } app_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 } rte_flow_tunnel_match(&info.tunnel, &pmd_items, &num_pmd_items, &error); rule_items = {pmd_items, app_items}; rule_actions = app_actions; attr.group = info.group_id; flow_2 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 7. Release PMD items after rule creation */ rte_flow_tunnel_item_release(port_id, pmd_items, num_pmd_items); References 1. https://mails.dpdk.org/archives/dev/2020-June/index.html Signed-off-by: Eli Britstein <elibr@mellanox.com> Signed-off-by: Gregory Etelson <getelson@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2020-10-16 15:51:06 +03:00
rte_flow_tunnel_decap_set;
rte_flow_tunnel_match;
rte_flow_get_restore_info;
rte_flow_tunnel_action_decap_release;
rte_flow_tunnel_item_release;
# added in 21.02
rte_eth_get_monitor_addr;
};
INTERNAL {
global:
rte_eth_dev_allocate;
rte_eth_dev_allocated;
rte_eth_dev_attach_secondary;
rte_eth_dev_callback_process;
rte_eth_dev_create;
rte_eth_dev_destroy;
rte_eth_dev_is_rx_hairpin_queue;
rte_eth_dev_is_tx_hairpin_queue;
rte_eth_dev_probing_finish;
rte_eth_dev_release_port;
rte_eth_dev_internal_reset;
rte_eth_devargs_parse;
rte_eth_dma_zone_free;
rte_eth_dma_zone_reserve;
rte_eth_hairpin_queue_peer_bind;
rte_eth_hairpin_queue_peer_unbind;
rte_eth_hairpin_queue_peer_update;
rte_eth_switch_domain_alloc;
rte_eth_switch_domain_free;
};