2020-08-10 17:10:20 +02:00
|
|
|
DPDK_21 {
|
2015-02-02 12:40:20 -05:00
|
|
|
global:
|
|
|
|
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_add_first_rx_callback;
|
2015-02-23 18:30:09 +00:00
|
|
|
rte_eth_add_rx_callback;
|
|
|
|
rte_eth_add_tx_callback;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_allmulticast_disable;
|
|
|
|
rte_eth_allmulticast_enable;
|
|
|
|
rte_eth_allmulticast_get;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_adjust_nb_rx_tx_desc;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_callback_register;
|
|
|
|
rte_eth_dev_callback_unregister;
|
|
|
|
rte_eth_dev_close;
|
|
|
|
rte_eth_dev_configure;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_count_avail;
|
|
|
|
rte_eth_dev_count_total;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_dev_default_mac_addr_set;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_flow_ctrl_get;
|
|
|
|
rte_eth_dev_flow_ctrl_set;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_fw_version_get;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_dev_get_dcb_info;
|
|
|
|
rte_eth_dev_get_eeprom;
|
|
|
|
rte_eth_dev_get_eeprom_length;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_get_mtu;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_get_name_by_port;
|
|
|
|
rte_eth_dev_get_port_by_name;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_dev_get_reg_info;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_get_sec_ctx;
|
|
|
|
rte_eth_dev_get_supported_ptypes;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_get_vlan_offload;
|
|
|
|
rte_eth_dev_info_get;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_dev_is_valid_port;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_logtype;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_mac_addr_add;
|
|
|
|
rte_eth_dev_mac_addr_remove;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_pool_ops_supported;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_priority_flow_ctrl_set;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_reset;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_rss_hash_conf_get;
|
|
|
|
rte_eth_dev_rss_hash_update;
|
|
|
|
rte_eth_dev_rss_reta_query;
|
|
|
|
rte_eth_dev_rss_reta_update;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_dev_rx_intr_ctl;
|
|
|
|
rte_eth_dev_rx_intr_ctl_q;
|
|
|
|
rte_eth_dev_rx_intr_disable;
|
|
|
|
rte_eth_dev_rx_intr_enable;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_rx_offload_name;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_rx_queue_start;
|
|
|
|
rte_eth_dev_rx_queue_stop;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_dev_set_eeprom;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_set_link_down;
|
|
|
|
rte_eth_dev_set_link_up;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_dev_set_mc_addr_list;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_set_mtu;
|
|
|
|
rte_eth_dev_set_rx_queue_stats_mapping;
|
|
|
|
rte_eth_dev_set_tx_queue_stats_mapping;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_set_vlan_ether_type;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_set_vlan_offload;
|
|
|
|
rte_eth_dev_set_vlan_pvid;
|
|
|
|
rte_eth_dev_set_vlan_strip_on_queue;
|
|
|
|
rte_eth_dev_socket_id;
|
|
|
|
rte_eth_dev_start;
|
|
|
|
rte_eth_dev_stop;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_tx_offload_name;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_tx_queue_start;
|
|
|
|
rte_eth_dev_tx_queue_stop;
|
|
|
|
rte_eth_dev_uc_all_hash_table_set;
|
|
|
|
rte_eth_dev_uc_hash_table_set;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_dev_udp_tunnel_port_add;
|
|
|
|
rte_eth_dev_udp_tunnel_port_delete;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_dev_vlan_filter;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_devices;
|
|
|
|
rte_eth_find_next;
|
|
|
|
rte_eth_find_next_owned_by;
|
|
|
|
rte_eth_iterator_cleanup;
|
|
|
|
rte_eth_iterator_init;
|
|
|
|
rte_eth_iterator_next;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_led_off;
|
|
|
|
rte_eth_led_on;
|
|
|
|
rte_eth_link_get;
|
|
|
|
rte_eth_link_get_nowait;
|
|
|
|
rte_eth_macaddr_get;
|
|
|
|
rte_eth_mirror_rule_reset;
|
|
|
|
rte_eth_mirror_rule_set;
|
|
|
|
rte_eth_promiscuous_disable;
|
|
|
|
rte_eth_promiscuous_enable;
|
|
|
|
rte_eth_promiscuous_get;
|
2015-02-23 18:30:09 +00:00
|
|
|
rte_eth_remove_rx_callback;
|
|
|
|
rte_eth_remove_tx_callback;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_rx_queue_info_get;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_rx_queue_setup;
|
|
|
|
rte_eth_set_queue_rate_limit;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_speed_bitflag;
|
2015-02-02 12:40:20 -05:00
|
|
|
rte_eth_stats_get;
|
|
|
|
rte_eth_stats_reset;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_timesync_adjust_time;
|
2015-07-02 16:16:28 +01:00
|
|
|
rte_eth_timesync_disable;
|
|
|
|
rte_eth_timesync_enable;
|
|
|
|
rte_eth_timesync_read_rx_timestamp;
|
2015-11-13 16:09:07 +00:00
|
|
|
rte_eth_timesync_read_time;
|
2015-11-24 17:37:56 +00:00
|
|
|
rte_eth_timesync_read_tx_timestamp;
|
2015-11-13 16:09:07 +00:00
|
|
|
rte_eth_timesync_write_time;
|
2016-03-10 18:19:34 +01:00
|
|
|
rte_eth_tx_buffer_count_callback;
|
|
|
|
rte_eth_tx_buffer_drop_callback;
|
|
|
|
rte_eth_tx_buffer_init;
|
|
|
|
rte_eth_tx_buffer_set_err_callback;
|
2017-06-22 13:04:59 +01:00
|
|
|
rte_eth_tx_done_cleanup;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_tx_queue_info_get;
|
|
|
|
rte_eth_tx_queue_setup;
|
|
|
|
rte_eth_xstats_get;
|
2017-04-27 16:42:36 +02:00
|
|
|
rte_eth_xstats_get_by_id;
|
2017-04-27 16:42:37 +02:00
|
|
|
rte_eth_xstats_get_id_by_name;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_xstats_get_names;
|
2017-04-27 16:42:36 +02:00
|
|
|
rte_eth_xstats_get_names_by_id;
|
2019-11-20 17:23:38 +00:00
|
|
|
rte_eth_xstats_reset;
|
|
|
|
rte_flow_copy;
|
|
|
|
rte_flow_create;
|
|
|
|
rte_flow_destroy;
|
|
|
|
rte_flow_error_set;
|
|
|
|
rte_flow_flush;
|
|
|
|
rte_flow_isolate;
|
|
|
|
rte_flow_query;
|
|
|
|
rte_flow_validate;
|
2017-06-14 16:48:51 +02:00
|
|
|
|
2019-11-20 17:23:38 +00:00
|
|
|
local: *;
|
|
|
|
};
|
2019-04-18 00:59:28 +02:00
|
|
|
|
2018-04-24 04:15:11 +02:00
|
|
|
EXPERIMENTAL {
|
|
|
|
global:
|
|
|
|
|
2019-07-31 14:27:16 +02:00
|
|
|
# added in 17.11
|
2017-10-13 13:22:16 +01:00
|
|
|
rte_mtr_capabilities_get;
|
|
|
|
rte_mtr_create;
|
|
|
|
rte_mtr_destroy;
|
|
|
|
rte_mtr_meter_disable;
|
|
|
|
rte_mtr_meter_dscp_table_update;
|
|
|
|
rte_mtr_meter_enable;
|
|
|
|
rte_mtr_meter_profile_add;
|
|
|
|
rte_mtr_meter_profile_delete;
|
|
|
|
rte_mtr_meter_profile_update;
|
|
|
|
rte_mtr_policer_actions_update;
|
|
|
|
rte_mtr_stats_read;
|
|
|
|
rte_mtr_stats_update;
|
2019-07-31 14:27:16 +02:00
|
|
|
|
|
|
|
# added in 18.02
|
|
|
|
rte_eth_dev_is_removed;
|
|
|
|
rte_eth_dev_owner_delete;
|
|
|
|
rte_eth_dev_owner_get;
|
|
|
|
rte_eth_dev_owner_new;
|
|
|
|
rte_eth_dev_owner_set;
|
|
|
|
rte_eth_dev_owner_unset;
|
|
|
|
|
|
|
|
# added in 18.05
|
|
|
|
rte_eth_dev_get_module_eeprom;
|
|
|
|
rte_eth_dev_get_module_info;
|
|
|
|
|
|
|
|
# added in 18.11
|
|
|
|
rte_eth_dev_rx_intr_ctl_q_get_fd;
|
|
|
|
rte_flow_conv;
|
|
|
|
|
|
|
|
# added in 19.05
|
|
|
|
rte_eth_find_next_of;
|
|
|
|
rte_eth_find_next_sibling;
|
|
|
|
|
|
|
|
# added in 19.08
|
|
|
|
rte_eth_read_clock;
|
2019-10-15 15:51:30 +08:00
|
|
|
|
|
|
|
# added in 19.11
|
2019-10-30 23:53:11 +00:00
|
|
|
rte_eth_dev_hairpin_capability_get;
|
2019-10-15 15:51:30 +08:00
|
|
|
rte_eth_rx_burst_mode_get;
|
2019-10-30 23:53:11 +00:00
|
|
|
rte_eth_rx_hairpin_queue_setup;
|
2019-10-15 15:51:30 +08:00
|
|
|
rte_eth_tx_burst_mode_get;
|
2019-10-30 23:53:11 +00:00
|
|
|
rte_eth_tx_hairpin_queue_setup;
|
ethdev: extend flow metadata
Currently, metadata can be set on egress path via mbuf tx_metadata field
with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata.
This patch extends the metadata feature usability.
1) RTE_FLOW_ACTION_TYPE_SET_META
When supporting multiple tables, Tx metadata can also be set by a rule and
matched by another rule. This new action allows metadata to be set as a
result of flow match.
2) Metadata on ingress
There's also need to support metadata on ingress. Metadata can be set by
SET_META action and matched by META item like Tx. The final value set by
the action will be delivered to application via metadata dynamic field of
mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with
rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper
routines. PKT_RX_DYNF_METADATA flag will be set along with the data.
The mbuf dynamic field must be registered by calling
rte_flow_dynf_metadata_register() prior to use SET_META action.
The availability of dynamic mbuf metadata field can be checked
with rte_flow_dynf_metadata_avail() routine.
If application is going to engage the metadata feature it registers
the metadata dynamic fields, then PMD checks the metadata field
availability and handles the appropriate fields in datapath.
For loopback/hairpin packet, metadata set on Rx/Tx may or may not be
propagated to the other path depending on hardware capability.
MARK and METADATA look similar and might operate in similar way,
but not interacting.
Initially, there were proposed two metadata related actions:
- RTE_FLOW_ACTION_TYPE_FLAG
- RTE_FLOW_ACTION_TYPE_MARK
These actions set the special flag in the packet metadata, MARK action
stores some specified value in the metadata storage, and, on the packet
receiving PMD puts the flag and value to the mbuf and applications can
see the packet was threated inside flow engine according to the appropriate
RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some
per-packet information from the flow engine to the application via
receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK
provided. It allows us to extend the flow match pattern with the capability
to match the metadata values set by MARK/FLAG actions on other flows.
From the datapath point of view, the MARK and FLAG are related to the
receiving side only. It would useful to have the same gateway on the
transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META
was proposed. The application can fill the field in mbuf and this value
will be transferred to some field in the packet metadata inside the flow
engine. It did not matter whether these metadata fields are shared because
of MARK and META items belonged to different domains (receiving and
transmitting) and could be vendor-specific.
So far, so good, DPDK proposes some entities to control metadata inside
the flow engine and gateways to exchange these values on a per-packet basis
via datapaths.
As we can see, the MARK and META means are not symmetric, there is absent
action which would allow us to set META value on the transmitting path.
So, the action of type:
- RTE_FLOW_ACTION_TYPE_SET_META was proposed.
The next, applications raise the new requirements for packet metadata.
The flow ngines are getting more complex, internal switches are introduced,
multiple ports might be supported within the same flow engine namespace.
From the DPDK points of view, it means the packets might be sent on one
eth_dev port and received on the other one, and the packet path inside
the flow engine entirely belongs to the same hardware device. The simplest
example is SR-IOV with PF, VFs and the representors. And there is a
brilliant opportunity to provide some out-of-band channel to transfer
some extra data from one port to another one, besides the packet data
itself. And applications would like to use this opportunity.
It is supposed for application to use trials (with rte_flow_validate)
to detect which metadata features (FLAG, MARK, META) actually supported
by PMD and underlying hardware. It might depend on PMD configuration,
system software, hardware settings, etc., and should be detected
in run time.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
|
|
|
rte_flow_dynf_metadata_offs;
|
|
|
|
rte_flow_dynf_metadata_mask;
|
|
|
|
rte_flow_dynf_metadata_register;
|
2019-11-11 18:49:05 +05:30
|
|
|
rte_eth_dev_set_ptypes;
|
2020-01-17 13:55:59 +02:00
|
|
|
|
|
|
|
# added in 20.02
|
|
|
|
rte_flow_dev_dump;
|
2020-04-23 00:33:45 +05:30
|
|
|
|
|
|
|
# added in 20.05
|
|
|
|
__rte_ethdev_trace_configure;
|
|
|
|
__rte_ethdev_trace_rxq_setup;
|
|
|
|
__rte_ethdev_trace_txq_setup;
|
|
|
|
__rte_ethdev_trace_start;
|
|
|
|
__rte_ethdev_trace_stop;
|
|
|
|
__rte_ethdev_trace_close;
|
|
|
|
__rte_ethdev_trace_rx_burst;
|
|
|
|
__rte_ethdev_trace_tx_burst;
|
2020-04-21 13:11:38 +03:00
|
|
|
rte_flow_get_aged_flows;
|
2020-09-10 15:39:29 +05:30
|
|
|
|
|
|
|
# Marked as experimental in 20.11
|
|
|
|
rte_tm_capabilities_get;
|
|
|
|
rte_tm_get_number_of_leaf_nodes;
|
|
|
|
rte_tm_hierarchy_commit;
|
|
|
|
rte_tm_level_capabilities_get;
|
|
|
|
rte_tm_mark_ip_dscp;
|
|
|
|
rte_tm_mark_ip_ecn;
|
|
|
|
rte_tm_mark_vlan_dei;
|
|
|
|
rte_tm_node_add;
|
|
|
|
rte_tm_node_capabilities_get;
|
|
|
|
rte_tm_node_cman_update;
|
|
|
|
rte_tm_node_delete;
|
|
|
|
rte_tm_node_parent_update;
|
|
|
|
rte_tm_node_resume;
|
|
|
|
rte_tm_node_shaper_update;
|
|
|
|
rte_tm_node_shared_shaper_update;
|
|
|
|
rte_tm_node_shared_wred_context_update;
|
|
|
|
rte_tm_node_stats_read;
|
|
|
|
rte_tm_node_stats_update;
|
|
|
|
rte_tm_node_suspend;
|
|
|
|
rte_tm_node_type_get;
|
|
|
|
rte_tm_node_wfq_weight_mode_update;
|
|
|
|
rte_tm_node_wred_context_update;
|
|
|
|
rte_tm_shaper_profile_add;
|
|
|
|
rte_tm_shaper_profile_delete;
|
|
|
|
rte_tm_shared_shaper_add_update;
|
|
|
|
rte_tm_shared_shaper_delete;
|
|
|
|
rte_tm_shared_wred_context_add_update;
|
|
|
|
rte_tm_shared_wred_context_delete;
|
|
|
|
rte_tm_wred_profile_add;
|
|
|
|
rte_tm_wred_profile_delete;
|
2020-09-15 22:06:57 +03:00
|
|
|
|
|
|
|
# added in 20.11
|
ethdev: add hairpin bind and unbind API
In single port hairpin mode, all the hairpin Tx and Rx queues belong
to the same device. After the queues are set up properly, there is
no other dependency between the Tx queue and its Rx peer queue. The
binding process that connected the Tx and Rx queues together from
hardware level will be done automatically during the device start
procedure. Everything required is configured and initialized already
for the binding process.
But in two ports hairpin mode, there will be some cross-dependences
between two different ports. Usually, the ports will be initialized
serially by the main thread but not in parallel. The earlier port
will not be able to enable the bind if the following peer port is
not yet configured with HW resources. What's more, if one port is
detached / attached dynamically, it would introduce more trouble
for the hairpin binding.
To overcome these, new APIs for binding and unbinding are added.
During startup, only the hairpin Tx and Rx peer queues will be set
up. Nothing will be done when starting the device if the queues are
without auto-bind attribute. Only after the required ports pair
started, the `rte_eth_hairpin_bind()` API can be called to bind the
all Tx queues of the egress port to the Rx queues of the peer port.
Then the connection between the egress and ingress ports pair will
be established.
The `rte_eth_hairpin_unbind()` API could be used to disconnect the
egress and the peer ingress ports. This should only be called before
the device is closed if needed. When doing the clean up, all the
egress and ingress pairs related to a single port should be taken
into consideration, especially in the hot unplug case.
mode is described.
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-15 21:08:51 +08:00
|
|
|
rte_eth_hairpin_bind;
|
2020-10-15 21:08:53 +08:00
|
|
|
rte_eth_hairpin_get_peer_ports;
|
ethdev: add hairpin bind and unbind API
In single port hairpin mode, all the hairpin Tx and Rx queues belong
to the same device. After the queues are set up properly, there is
no other dependency between the Tx queue and its Rx peer queue. The
binding process that connected the Tx and Rx queues together from
hardware level will be done automatically during the device start
procedure. Everything required is configured and initialized already
for the binding process.
But in two ports hairpin mode, there will be some cross-dependences
between two different ports. Usually, the ports will be initialized
serially by the main thread but not in parallel. The earlier port
will not be able to enable the bind if the following peer port is
not yet configured with HW resources. What's more, if one port is
detached / attached dynamically, it would introduce more trouble
for the hairpin binding.
To overcome these, new APIs for binding and unbinding are added.
During startup, only the hairpin Tx and Rx peer queues will be set
up. Nothing will be done when starting the device if the queues are
without auto-bind attribute. Only after the required ports pair
started, the `rte_eth_hairpin_bind()` API can be called to bind the
all Tx queues of the egress port to the Rx queues of the peer port.
Then the connection between the egress and ingress ports pair will
be established.
The `rte_eth_hairpin_unbind()` API could be used to disconnect the
egress and the peer ingress ports. This should only be called before
the device is closed if needed. When doing the clean up, all the
egress and ingress pairs related to a single port should be taken
into consideration, especially in the hot unplug case.
mode is described.
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-15 21:08:51 +08:00
|
|
|
rte_eth_hairpin_unbind;
|
2020-09-15 22:06:57 +03:00
|
|
|
rte_eth_link_speed_to_str;
|
|
|
|
rte_eth_link_to_str;
|
2020-10-08 18:02:54 +08:00
|
|
|
rte_eth_fec_get_capability;
|
|
|
|
rte_eth_fec_get;
|
|
|
|
rte_eth_fec_set;
|
ethdev: add shared actions to flow API
Introduce extension of flow action API enabling sharing of single
rte_flow_action in multiple flows. The API intended for PMDs, where
multiple HW offloaded flows can reuse the same HW essence/object
representing flow action and modification of such an essence/object
affects all the rules using it.
Motivation and example
===
Adding or removing one or more queues to RSS used by multiple flow rules
imposes per rule toll for current DPDK flow API; the scenario requires
for each flow sharing cloned RSS action:
- call `rte_flow_destroy()`
- call `rte_flow_create()` with modified RSS action
API for sharing action and its in-place update benefits:
- reduce the overhead of multiple RSS flow rules reconfiguration
- optimize resource utilization by sharing action across multiple
flows
Change description
===
Shared action
===
In order to represent flow action shared by multiple flows new action
type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum
rte_flow_action_type`).
Actually the introduced API decouples action from any specific flow and
enables sharing of single action by its handle across multiple flows.
Shared action create/use/destroy
===
Shared action may be reused by some or none flow rules at any given
moment, i.e. shared action resides outside of the context of any flow.
Shared action represent HW resources/objects used for action offloading
implementation.
API for shared action create (see `rte_flow_shared_action_create()`):
- should allocate HW resources and make related initializations required
for shared action implementation.
- make necessary preparations to maintain shared access to
the action resources, configuration and state.
API for shared action destroy (see `rte_flow_shared_action_destroy()`)
should release HW resources and make related cleanups required for shared
action implementation.
In order to share some flow action reuse the handle of type
`struct rte_flow_shared_action` returned by
rte_flow_shared_action_create() as a `conf` field of
`struct rte_flow_action` (see "example" section).
If some shared action not used by any flow rule all resources allocated
by the shared action can be released by rte_flow_shared_action_destroy()
(see "example" section). The shared action handle passed as argument to
destroy API should not be used any further i.e. result of the usage is
undefined.
Shared action re-configuration
===
Shared action behavior defined by its configuration can be updated via
rte_flow_shared_action_update() (see "example" section). The shared
action update operation modifies HW related resources/objects allocated
on the action creation. The number of operations performed by the update
operation should not depend on the number of flows sharing the related
action. On return of shared action update API action behavior should be
according to updated configuration for all flows sharing the action.
Shared action query
===
Provide separate API to query shared action state (see
rte_flow_shared_action_update()). Taking a counter as an example: query
returns value aggregating all counter increments across all flow rules
sharing the counter. This API doesn't query shared action configuration
since it is controlled by rte_flow_shared_action_create() and
rte_flow_shared_action_update() APIs and no supposed to change by other
means.
example
===
struct rte_flow_action actions[2];
struct rte_flow_shared_action_conf conf;
struct rte_flow_action action;
/* skipped: initialize conf and action */
struct rte_flow_shared_action *handle =
rte_flow_shared_action_create(port_id, &conf, &action, &error);
actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED;
actions[0].conf = handle;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
/* skipped: init attr0 & pattern0 args */
struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0,
actions, error);
/* create more rules reusing shared action */
struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1,
actions, error);
/* skipped: for flows 2 till N */
struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN,
actions, error);
/* update shared action */
struct rte_flow_action updated_action;
/*
* skipped: initialize updated_action according to desired action
* configuration change
*/
rte_flow_shared_action_update(port_id, handle, &updated_action, error);
/*
* from now on all flows 1 till N will act according to configuration of
* updated_action
*/
/* skipped: destroy all flows 1 till N */
rte_flow_shared_action_destroy(port_id, handle, error);
Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
|
|
|
rte_flow_shared_action_create;
|
|
|
|
rte_flow_shared_action_destroy;
|
|
|
|
rte_flow_shared_action_query;
|
|
|
|
rte_flow_shared_action_update;
|
ethdev: add tunnel offload model
rte_flow API provides the building blocks for vendor-agnostic flow
classification offloads. The rte_flow "patterns" and "actions"
primitives are fine-grained, thus enabling DPDK applications the
flexibility to offload network stacks and complex pipelines.
Applications wishing to offload tunneled traffic are required to use
the rte_flow primitives, such as group, meta, mark, tag, and others to
model their high-level objects. The hardware model design for
high-level software objects is not trivial. Furthermore, an optimal
design is often vendor-specific.
When hardware offloads tunneled traffic in multi-group logic,
partially offloaded packets may arrive to the application after they
were modified in hardware. In this case, the application may need to
restore the original packet headers. Consider the following sequence:
The application decaps a packet in one group and jumps to a second
group where it tries to match on a 5-tuple, that will miss and send
the packet to the application. In this case, the application does not
receive the original packet but a modified one. Also, in this case,
the application cannot match on the outer header fields, such as VXLAN
vni and 5-tuple.
There are several possible ways to use rte_flow "patterns" and
"actions" to resolve the issues above. For example:
1 Mapping headers to a hardware registers using the
rte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects.
2 Apply the decap only at the last offload stage after all the
"patterns" were matched and the packet will be fully offloaded.
Every approach has its pros and cons and is highly dependent on the
hardware vendor. For example, some hardware may have a limited number
of registers while other hardware could not support inner actions and
must decap before accessing inner headers.
The tunnel offload model resolves these issues. The model goals are:
1 Provide a unified application API to offload tunneled traffic that
is capable to match on outer headers after decap.
2 Allow the application to restore the outer header of partially
offloaded packets.
The tunnel offload model does not introduce new elements to the
existing RTE flow model and is implemented as a set of helper
functions.
For the application to work with the tunnel offload API it
has to adjust flow rules in multi-table tunnel offload in the
following way:
1 Remove explicit call to decap action and replace it with PMD actions
obtained from rte_flow_tunnel_decap_and_set() helper.
2 Add PMD items obtained from rte_flow_tunnel_match() helper to all
other rules in the tunnel offload sequence.
VXLAN Code example:
Assume application needs to do inner NAT on the VXLAN packet.
The first rule in group 0:
flow create <port id> ingress group 0
pattern eth / ipv4 / udp dst is 4789 / vxlan / end
actions {pmd actions} / jump group 3 / end
The first VXLAN packet that arrives matches the rule in group 0 and
jumps to group 3. In group 3 the packet will miss since there is no
flow to match and will be sent to the application. Application will
call rte_flow_get_restore_info() to get the packet outer header.
Application will insert a new rule in group 3 to match outer and inner
headers:
flow create <port id> ingress group 3
pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 /
udp dst 4789 / vxlan vni is 10 /
ipv4 dst is 184.1.2.3 / end
actions set_ipv4_dst 186.1.1.1 / queue index 3 / end
Resulting of the rules will be that VXLAN packet with vni=10, outer
IPv4 dst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received
decapped on queue 3 with IPv4 dst=186.1.1.1
Note: The packet in group 3 is considered decapped. All actions in
that group will be done on the header that was inner before decap. The
application may specify an outer header to be matched on. It's PMD
responsibility to translate these items to outer metadata.
API usage:
/**
* 1. Initiate RTE flow tunnel object
*/
const struct rte_flow_tunnel tunnel = {
.type = RTE_FLOW_ITEM_TYPE_VXLAN,
.tun_id = 10,
}
/**
* 2. Obtain PMD tunnel actions
*
* pmd_actions is an intermediate variable application uses to
* compile actions array
*/
struct rte_flow_action **pmd_actions;
rte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions,
&num_pmd_actions, &error);
/**
* 3. offload the first rule
* matching on VXLAN traffic and jumps to group 3
* (implicitly decaps packet)
*/
app_actions = jump group 3
rule_items = app_items; /** eth / ipv4 / udp / vxlan */
rule_actions = { pmd_actions, app_actions };
attr.group = 0;
flow_1 = rte_flow_create(port_id, &attr,
rule_items, rule_actions, &error);
/**
* 4. after flow creation application does not need to keep the
* tunnel action resources.
*/
rte_flow_tunnel_action_release(port_id, pmd_actions,
num_pmd_actions);
/**
* 5. After partially offloaded packet miss because there was no
* matching rule handle miss on group 3
*/
struct rte_flow_restore_info info;
rte_flow_get_restore_info(port_id, mbuf, &info, &error);
/**
* 6. Offload NAT rule:
*/
app_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 /
vxlan vni is 10 / ipv4 dst is 184.1.2.3 }
app_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 }
rte_flow_tunnel_match(&info.tunnel, &pmd_items,
&num_pmd_items, &error);
rule_items = {pmd_items, app_items};
rule_actions = app_actions;
attr.group = info.group_id;
flow_2 = rte_flow_create(port_id, &attr,
rule_items, rule_actions, &error);
/**
* 7. Release PMD items after rule creation
*/
rte_flow_tunnel_item_release(port_id,
pmd_items, num_pmd_items);
References
1. https://mails.dpdk.org/archives/dev/2020-June/index.html
Signed-off-by: Eli Britstein <elibr@mellanox.com>
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2020-10-16 15:51:06 +03:00
|
|
|
rte_flow_tunnel_decap_set;
|
|
|
|
rte_flow_tunnel_match;
|
|
|
|
rte_flow_get_restore_info;
|
|
|
|
rte_flow_tunnel_action_decap_release;
|
|
|
|
rte_flow_tunnel_item_release;
|
2021-01-14 14:46:08 +00:00
|
|
|
|
|
|
|
# added in 21.02
|
|
|
|
rte_eth_get_monitor_addr;
|
2018-04-27 16:17:55 +01:00
|
|
|
};
|
2020-07-10 22:43:42 +01:00
|
|
|
|
|
|
|
INTERNAL {
|
|
|
|
global:
|
|
|
|
|
2020-09-09 14:01:45 +01:00
|
|
|
rte_eth_dev_allocate;
|
|
|
|
rte_eth_dev_allocated;
|
|
|
|
rte_eth_dev_attach_secondary;
|
2020-09-09 14:01:48 +01:00
|
|
|
rte_eth_dev_callback_process;
|
2020-09-09 14:01:45 +01:00
|
|
|
rte_eth_dev_create;
|
|
|
|
rte_eth_dev_destroy;
|
2020-09-09 14:01:46 +01:00
|
|
|
rte_eth_dev_is_rx_hairpin_queue;
|
|
|
|
rte_eth_dev_is_tx_hairpin_queue;
|
2020-09-09 14:01:45 +01:00
|
|
|
rte_eth_dev_probing_finish;
|
|
|
|
rte_eth_dev_release_port;
|
2020-09-09 14:01:48 +01:00
|
|
|
rte_eth_dev_internal_reset;
|
2020-09-09 14:01:45 +01:00
|
|
|
rte_eth_devargs_parse;
|
2020-07-10 22:43:42 +01:00
|
|
|
rte_eth_dma_zone_free;
|
2020-09-09 14:01:45 +01:00
|
|
|
rte_eth_dma_zone_reserve;
|
2020-10-15 21:08:54 +08:00
|
|
|
rte_eth_hairpin_queue_peer_bind;
|
|
|
|
rte_eth_hairpin_queue_peer_unbind;
|
|
|
|
rte_eth_hairpin_queue_peer_update;
|
2020-09-09 14:01:45 +01:00
|
|
|
rte_eth_switch_domain_alloc;
|
|
|
|
rte_eth_switch_domain_free;
|
2020-07-10 22:43:42 +01:00
|
|
|
};
|