numam-dpdk/lib/ethdev/rte_flow.h

4266 lines
117 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
* Copyright 2016 Mellanox Technologies, Ltd
*/
#ifndef RTE_FLOW_H_
#define RTE_FLOW_H_
/**
* @file
* RTE generic flow API
*
* This interface provides the ability to program packet matching and
* associated actions in hardware through flow rules.
*/
#include <stddef.h>
#include <stdint.h>
#include <rte_arp.h>
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_icmp.h>
#include <rte_ip.h>
#include <rte_sctp.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_vxlan.h>
#include <rte_byteorder.h>
#include <rte_esp.h>
#include <rte_higig.h>
#include <rte_ecpri.h>
ethdev: introduce conntrack flow action and item This commit introduces the conntrack action and item. Usually the HW offloading is stateless. For some stateful offloading like a TCP connection, HW module will help provide the ability of a full offloading w/o SW participation after the connection was established. The basic usage is that in the first flow rule the application should add the conntrack action and jump to the next flow table. In the following flow rule(s) of the next table, the application should use the conntrack item to match on the result. A TCP connection has two directions traffic. To set a conntrack action context correctly, the information of packets from both directions are required. The conntrack action should be created on one ethdev port and supply the peer ethdev port as a parameter to the action. After context created, it could only be used between these two ethdev ports (dual-port mode) or a single port. The application should modify the action via the API "rte_action_handle_update" only when before using it to create a flow rule with conntrack for the opposite direction. This will help the driver to recognize the direction of the flow to be created, especially in the single-port mode, in which case the traffic from both directions will go through the same ethdev port if the application works as an "forwarding engine" but not an end point. There is no need to call the update interface if the subsequent flow rules have nothing to be changed. Query will be supported via "rte_action_handle_query" interface, about the current packets information and connection status. The fields query capabilities depends on the HW. For the packets received during the conntrack setup, it is suggested to re-inject the packets in order to make sure the conntrack module works correctly without missing any packet. Only the valid packets should pass the conntrack, packets with invalid TCP information, like out of window, or with invalid header, like malformed, should not pass. Naming and definition: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ netfilter/nf_conntrack_tcp.h https://elixir.bootlin.com/linux/latest/source/net/netfilter/ nf_conntrack_proto_tcp.c Other reference: https://www.usenix.org/legacy/events/sec01/invitedtalks/rooij.pdf Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 17:51:30 +00:00
#include <rte_bitops.h>
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
#include <rte_mbuf.h>
#include <rte_mbuf_dyn.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* Flow rule attributes.
*
* Priorities are set on a per rule based within groups.
*
* Lower values denote higher priority, the highest priority for a flow rule
* is 0, so that a flow that matches for than one rule, the rule with the
* lowest priority value will always be matched.
*
* Although optional, applications are encouraged to group similar rules as
* much as possible to fully take advantage of hardware capabilities
* (e.g. optimized matching) and work around limitations (e.g. a single
* pattern type possibly allowed in a given group). Applications should be
* aware that groups are not linked by default, and that they must be
* explicitly linked by the application using the JUMP action.
*
* Priority levels are arbitrary and up to the application, they
* do not need to be contiguous nor start from 0, however the maximum number
* varies between devices and may be affected by existing flow rules.
*
* If a packet is matched by several rules of a given group for a given
* priority level, the outcome is undefined. It can take any path, may be
* duplicated or even cause unrecoverable errors.
*
* Note that support for more than a single group and priority level is not
* guaranteed.
*
* Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
*
* Several pattern items and actions are valid and can be used in both
* directions. Those valid for only one direction are described as such.
*
* At least one direction must be specified.
*
* Specifying both directions at once for a given rule is not recommended
* but may be valid in a few cases (e.g. shared counter).
*/
struct rte_flow_attr {
uint32_t group; /**< Priority group. */
uint32_t priority; /**< Rule priority level within group. */
uint32_t ingress:1; /**< Rule applies to ingress traffic. */
uint32_t egress:1; /**< Rule applies to egress traffic. */
/**
* Instead of simply matching the properties of traffic as it would
* appear on a given DPDK port ID, enabling this attribute transfers
* a flow rule to the lowest possible level of any device endpoints
* found in the pattern.
*
* When supported, this effectively enables an application to
* re-route traffic not necessarily intended for it (e.g. coming
* from or addressed to different physical ports, VFs or
* applications) at the device level.
*
* It complements the behavior of some pattern items such as
* RTE_FLOW_ITEM_TYPE_PHY_PORT and is meaningless without them.
*
* When transferring flow rules, ingress and egress attributes keep
* their original meaning, as if processing traffic emitted or
* received by the application.
*/
uint32_t transfer:1;
uint32_t reserved:29; /**< Reserved, must be zero. */
};
/**
* Matching pattern item types.
*
* Pattern items fall in two categories:
*
* - Matching protocol headers and packet data, usually associated with a
* specification structure. These must be stacked in the same order as the
* protocol layers to match inside packets, starting from the lowest.
*
* - Matching meta-data or affecting pattern processing, often without a
* specification structure. Since they do not match packet contents, their
* position in the list is usually not relevant.
*
* See the description of individual types for more information. Those
* marked with [META] fall into the second category.
*/
enum rte_flow_item_type {
/**
* [META]
*
* End marker for item lists. Prevents further processing of items,
* thereby ending the pattern.
*
* No associated specification structure.
*/
RTE_FLOW_ITEM_TYPE_END,
/**
* [META]
*
* Used as a placeholder for convenience. It is ignored and simply
* discarded by PMDs.
*
* No associated specification structure.
*/
RTE_FLOW_ITEM_TYPE_VOID,
/**
* [META]
*
* Inverted matching, i.e. process packets that do not match the
* pattern.
*
* No associated specification structure.
*/
RTE_FLOW_ITEM_TYPE_INVERT,
/**
* Matches any protocol in place of the current layer, a single ANY
* may also stand for several protocol layers.
*
* See struct rte_flow_item_any.
*/
RTE_FLOW_ITEM_TYPE_ANY,
/**
* [META]
*
* Matches traffic originating from (ingress) or going to (egress)
* the physical function of the current device.
*
* No associated specification structure.
*/
RTE_FLOW_ITEM_TYPE_PF,
/**
* [META]
*
* Matches traffic originating from (ingress) or going to (egress) a
* given virtual function of the current device.
*
* See struct rte_flow_item_vf.
*/
RTE_FLOW_ITEM_TYPE_VF,
/**
* [META]
*
* Matches traffic originating from (ingress) or going to (egress) a
* physical port of the underlying device.
*
* See struct rte_flow_item_phy_port.
*/
RTE_FLOW_ITEM_TYPE_PHY_PORT,
/**
* [META]
*
* Matches traffic originating from (ingress) or going to (egress) a
* given DPDK port ID.
*
* See struct rte_flow_item_port_id.
*/
RTE_FLOW_ITEM_TYPE_PORT_ID,
/**
* Matches a byte string of a given length at a given offset.
*
* See struct rte_flow_item_raw.
*/
RTE_FLOW_ITEM_TYPE_RAW,
/**
* Matches an Ethernet header.
*
* See struct rte_flow_item_eth.
*/
RTE_FLOW_ITEM_TYPE_ETH,
/**
* Matches an 802.1Q/ad VLAN tag.
*
* See struct rte_flow_item_vlan.
*/
RTE_FLOW_ITEM_TYPE_VLAN,
/**
* Matches an IPv4 header.
*
* See struct rte_flow_item_ipv4.
*/
RTE_FLOW_ITEM_TYPE_IPV4,
/**
* Matches an IPv6 header.
*
* See struct rte_flow_item_ipv6.
*/
RTE_FLOW_ITEM_TYPE_IPV6,
/**
* Matches an ICMP header.
*
* See struct rte_flow_item_icmp.
*/
RTE_FLOW_ITEM_TYPE_ICMP,
/**
* Matches a UDP header.
*
* See struct rte_flow_item_udp.
*/
RTE_FLOW_ITEM_TYPE_UDP,
/**
* Matches a TCP header.
*
* See struct rte_flow_item_tcp.
*/
RTE_FLOW_ITEM_TYPE_TCP,
/**
* Matches a SCTP header.
*
* See struct rte_flow_item_sctp.
*/
RTE_FLOW_ITEM_TYPE_SCTP,
/**
* Matches a VXLAN header.
*
* See struct rte_flow_item_vxlan.
*/
RTE_FLOW_ITEM_TYPE_VXLAN,
/**
* Matches a E_TAG header.
*
* See struct rte_flow_item_e_tag.
*/
RTE_FLOW_ITEM_TYPE_E_TAG,
/**
* Matches a NVGRE header.
*
* See struct rte_flow_item_nvgre.
*/
RTE_FLOW_ITEM_TYPE_NVGRE,
/**
* Matches a MPLS header.
*
* See struct rte_flow_item_mpls.
*/
RTE_FLOW_ITEM_TYPE_MPLS,
/**
* Matches a GRE header.
*
* See struct rte_flow_item_gre.
*/
RTE_FLOW_ITEM_TYPE_GRE,
/**
* [META]
*
* Fuzzy pattern match, expect faster than default.
*
* This is for device that support fuzzy matching option.
* Usually a fuzzy matching is fast but the cost is accuracy.
*
* See struct rte_flow_item_fuzzy.
*/
RTE_FLOW_ITEM_TYPE_FUZZY,
/**
* Matches a GTP header.
*
* Configure flow for GTP packets.
*
* See struct rte_flow_item_gtp.
*/
RTE_FLOW_ITEM_TYPE_GTP,
/**
* Matches a GTP header.
*
* Configure flow for GTP-C packets.
*
* See struct rte_flow_item_gtp.
*/
RTE_FLOW_ITEM_TYPE_GTPC,
/**
* Matches a GTP header.
*
* Configure flow for GTP-U packets.
*
* See struct rte_flow_item_gtp.
*/
RTE_FLOW_ITEM_TYPE_GTPU,
/**
* Matches a ESP header.
*
* See struct rte_flow_item_esp.
*/
RTE_FLOW_ITEM_TYPE_ESP,
/**
* Matches a GENEVE header.
*
* See struct rte_flow_item_geneve.
*/
RTE_FLOW_ITEM_TYPE_GENEVE,
/**
* Matches a VXLAN-GPE header.
*
* See struct rte_flow_item_vxlan_gpe.
*/
RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
/**
* Matches an ARP header for Ethernet/IPv4.
*
* See struct rte_flow_item_arp_eth_ipv4.
*/
RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
/**
* Matches the presence of any IPv6 extension header.
*
* See struct rte_flow_item_ipv6_ext.
*/
RTE_FLOW_ITEM_TYPE_IPV6_EXT,
/**
* Matches any ICMPv6 header.
*
* See struct rte_flow_item_icmp6.
*/
RTE_FLOW_ITEM_TYPE_ICMP6,
/**
* Matches an ICMPv6 neighbor discovery solicitation.
*
* See struct rte_flow_item_icmp6_nd_ns.
*/
RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS,
/**
* Matches an ICMPv6 neighbor discovery advertisement.
*
* See struct rte_flow_item_icmp6_nd_na.
*/
RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA,
/**
* Matches the presence of any ICMPv6 neighbor discovery option.
*
* See struct rte_flow_item_icmp6_nd_opt.
*/
RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT,
/**
* Matches an ICMPv6 neighbor discovery source Ethernet link-layer
* address option.
*
* See struct rte_flow_item_icmp6_nd_opt_sla_eth.
*/
RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH,
/**
* Matches an ICMPv6 neighbor discovery target Ethernet link-layer
* address option.
*
* See struct rte_flow_item_icmp6_nd_opt_tla_eth.
*/
RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH,
/**
* Matches specified mark field.
*
* See struct rte_flow_item_mark.
*/
RTE_FLOW_ITEM_TYPE_MARK,
/**
* [META]
*
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
* Matches a metadata value.
*
* See struct rte_flow_item_meta.
*/
RTE_FLOW_ITEM_TYPE_META,
/**
* Matches a GRE optional key field.
*
* The value should a big-endian 32bit integer.
*
* When this item present the K bit is implicitly matched as "1"
* in the default mask.
*
* @p spec/mask type:
* @code rte_be32_t * @endcode
*/
RTE_FLOW_ITEM_TYPE_GRE_KEY,
/**
* Matches a GTP extension header: PDU session container.
*
* Configure flow for GTP packets with extension header type 0x85.
*
* See struct rte_flow_item_gtp_psc.
*/
RTE_FLOW_ITEM_TYPE_GTP_PSC,
/**
* Matches a PPPoE header.
*
* Configure flow for PPPoE session packets.
*
* See struct rte_flow_item_pppoe.
*/
RTE_FLOW_ITEM_TYPE_PPPOES,
/**
* Matches a PPPoE header.
*
* Configure flow for PPPoE discovery packets.
*
* See struct rte_flow_item_pppoe.
*/
RTE_FLOW_ITEM_TYPE_PPPOED,
/**
* Matches a PPPoE optional proto_id field.
*
* It only applies to PPPoE session packets.
*
* See struct rte_flow_item_pppoe_proto_id.
*/
RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
/**
* Matches Network service header (NSH).
* See struct rte_flow_item_nsh.
*
*/
RTE_FLOW_ITEM_TYPE_NSH,
/**
* Matches Internet Group Management Protocol (IGMP).
* See struct rte_flow_item_igmp.
*
*/
RTE_FLOW_ITEM_TYPE_IGMP,
/**
* Matches IP Authentication Header (AH).
* See struct rte_flow_item_ah.
*
*/
RTE_FLOW_ITEM_TYPE_AH,
/**
* Matches a HIGIG header.
* see struct rte_flow_item_higig2_hdr.
*/
RTE_FLOW_ITEM_TYPE_HIGIG2,
ethdev: add flow tag A tag is a transient data which can be used during flow match. This can be used to store match result from a previous table so that the same pattern need not be matched again on the next table. Even if outer header is decapsulated on the previous match, the match result can be kept. Some device expose internal registers of its flow processing pipeline and those registers are quite useful for stateful connection tracking as it keeps status of flow matching. Multiple tags are supported by specifying index. Example testpmd commands are: flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xaa00bb mask 0xffff00ff / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xcc00 mask 0xff00 / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xaa00bb value mask 0xffff00ff / eth ... / end actions ... jump group 2 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xcc00 value mask 0xff00 / tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end flow create 0 ingress group 2 pattern tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-10-27 18:42:28 +00:00
/**
ethdev: add flow tag A tag is a transient data which can be used during flow match. This can be used to store match result from a previous table so that the same pattern need not be matched again on the next table. Even if outer header is decapsulated on the previous match, the match result can be kept. Some device expose internal registers of its flow processing pipeline and those registers are quite useful for stateful connection tracking as it keeps status of flow matching. Multiple tags are supported by specifying index. Example testpmd commands are: flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xaa00bb mask 0xffff00ff / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xcc00 mask 0xff00 / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xaa00bb value mask 0xffff00ff / eth ... / end actions ... jump group 2 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xcc00 value mask 0xff00 / tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end flow create 0 ingress group 2 pattern tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-10-27 18:42:28 +00:00
* [META]
*
* Matches a tag value.
*
* See struct rte_flow_item_tag.
*/
RTE_FLOW_ITEM_TYPE_TAG,
/**
* Matches a L2TPv3 over IP header.
*
* Configure flow for L2TPv3 over IP packets.
*
* See struct rte_flow_item_l2tpv3oip.
*/
RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
/**
* Matches PFCP Header.
* See struct rte_flow_item_pfcp.
*
*/
RTE_FLOW_ITEM_TYPE_PFCP,
/**
* Matches eCPRI Header.
*
* Configure flow for eCPRI over ETH or UDP packets.
*
* See struct rte_flow_item_ecpri.
*/
RTE_FLOW_ITEM_TYPE_ECPRI,
/**
* Matches the presence of IPv6 fragment extension header.
*
* See struct rte_flow_item_ipv6_frag_ext.
*/
RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
/**
* Matches Geneve Variable Length Option
*
* See struct rte_flow_item_geneve_opt
*/
RTE_FLOW_ITEM_TYPE_GENEVE_OPT,
ethdev: add packet integrity check flow rules Currently, DPDK application can offload the checksum check, and report it in the mbuf. However, as more and more applications are offloading some or all logic and action to the HW, there is a need to check the packet integrity so the right decision can be taken. The application logic can be positive meaning if the packet is valid jump / do actions, or negative if packet is not valid jump to SW / do actions (like drop) and add default flow (match all in low priority) that will direct the miss packet to the miss path. Since currently rte_flow works in positive way the assumption is that the positive way will be the common way in this case also. When thinking what is the best API to implement such feature, we need to consider the following (in no specific order): 1. API breakage. 2. Simplicity. 3. Performance. 4. HW capabilities. 5. rte_flow limitation. 6. Flexibility. First option: Add integrity flags to each of the items. For example add checksum_ok to IPv4 item. Pros: 1. No new rte_flow item. 2. Simple in the way that on each item the app can see what checks are available. Cons: 1. API breakage. 2. Increase number of flows, since app can't add global rule and must have dedicated flow for each of the flow combinations, for example matching on ICMP traffic or UDP/TCP traffic with IPv4 / IPv6 will result in 5 flows. Second option: dedicated item Pros: 1. No API breakage, and there will be no for some time due to having extra space. (by using bits) 2. Just one flow to support the ICMP or UDP/TCP traffic with IPv4 / IPv6. 3. Simplicity application can just look at one place to see all possible checks. 4. Allow future support for more tests. Cons: 1. New item, that holds number of fields from different items. For starter the following bits are suggested: 1. packet_ok - means that all HW checks depending on packet layer have passed. This may mean that in some HW such flow should be split to number of flows or fail. 2. l2_ok - all check for layer 2 have passed. 3. l3_ok - all check for layer 3 have passed. If packet doesn't have L3 layer this check should fail. 4. l4_ok - all check for layer 4 have passed. If packet doesn't have L4 layer this check should fail. 5. l2_crc_ok - the layer 2 CRC is O.K. 6. ipv4_csum_ok - IPv4 checksum is O.K. It is possible that the IPv4 checksum will be O.K. but the l3_ok will be 0. It is not possible that checksum will be 0 and the l3_ok will be 1. 7. l4_csum_ok - layer 4 checksum is O.K. 8. l3_len_OK - check that the reported layer 3 length is smaller than the frame length. Example of usage: 1. Check packets from all possible layers for integrity. flow create integrity spec packet_ok = 1 mask packet_ok = 1 ..... 2. Check only packet with layer 4 (UDP / TCP) flow create integrity spec l3_ok = 1, l4_ok = 1 mask l3_ok = 1 l4_ok = 1 Signed-off-by: Ori Kam <orika@nvidia.com> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 12:44:30 +00:00
/**
* [META]
*
* Matches on packet integrity.
* For some devices application needs to enable integration checks in HW
* before using this item.
*
* @see struct rte_flow_item_integrity.
*/
RTE_FLOW_ITEM_TYPE_INTEGRITY,
ethdev: introduce conntrack flow action and item This commit introduces the conntrack action and item. Usually the HW offloading is stateless. For some stateful offloading like a TCP connection, HW module will help provide the ability of a full offloading w/o SW participation after the connection was established. The basic usage is that in the first flow rule the application should add the conntrack action and jump to the next flow table. In the following flow rule(s) of the next table, the application should use the conntrack item to match on the result. A TCP connection has two directions traffic. To set a conntrack action context correctly, the information of packets from both directions are required. The conntrack action should be created on one ethdev port and supply the peer ethdev port as a parameter to the action. After context created, it could only be used between these two ethdev ports (dual-port mode) or a single port. The application should modify the action via the API "rte_action_handle_update" only when before using it to create a flow rule with conntrack for the opposite direction. This will help the driver to recognize the direction of the flow to be created, especially in the single-port mode, in which case the traffic from both directions will go through the same ethdev port if the application works as an "forwarding engine" but not an end point. There is no need to call the update interface if the subsequent flow rules have nothing to be changed. Query will be supported via "rte_action_handle_query" interface, about the current packets information and connection status. The fields query capabilities depends on the HW. For the packets received during the conntrack setup, it is suggested to re-inject the packets in order to make sure the conntrack module works correctly without missing any packet. Only the valid packets should pass the conntrack, packets with invalid TCP information, like out of window, or with invalid header, like malformed, should not pass. Naming and definition: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ netfilter/nf_conntrack_tcp.h https://elixir.bootlin.com/linux/latest/source/net/netfilter/ nf_conntrack_proto_tcp.c Other reference: https://www.usenix.org/legacy/events/sec01/invitedtalks/rooij.pdf Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 17:51:30 +00:00
/**
* [META]
*
* Matches conntrack state.
*
* @see struct rte_flow_item_conntrack.
*/
RTE_FLOW_ITEM_TYPE_CONNTRACK,
};
/**
*
* RTE_FLOW_ITEM_TYPE_HIGIG2
* Matches higig2 header
*/
RTE_STD_C11
struct rte_flow_item_higig2_hdr {
struct rte_higig2_hdr hdr;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_HIGIG2. */
#ifndef __cplusplus
static const struct rte_flow_item_higig2_hdr rte_flow_item_higig2_hdr_mask = {
.hdr = {
.ppt1 = {
.classification = 0xffff,
.vid = 0xfff,
},
},
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ANY
*
* Matches any protocol in place of the current layer, a single ANY may also
* stand for several protocol layers.
*
* This is usually specified as the first pattern item when looking for a
* protocol anywhere in a packet.
*
* A zeroed mask stands for any number of layers.
*/
struct rte_flow_item_any {
uint32_t num; /**< Number of layers covered. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_any rte_flow_item_any_mask = {
.num = 0x00000000,
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_VF
*
* Matches traffic originating from (ingress) or going to (egress) a given
* virtual function of the current device.
*
* If supported, should work even if the virtual function is not managed by
* the application and thus not associated with a DPDK port ID.
*
* Note this pattern item does not match VF representors traffic which, as
* separate entities, should be addressed through their own DPDK port IDs.
*
* - Can be specified multiple times to match traffic addressed to several
* VF IDs.
* - Can be combined with a PF item to match both PF and VF traffic.
*
* A zeroed mask can be used to match any VF ID.
*/
struct rte_flow_item_vf {
uint32_t id; /**< VF ID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
.id = 0x00000000,
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_PHY_PORT
*
* Matches traffic originating from (ingress) or going to (egress) a
* physical port of the underlying device.
*
* The first PHY_PORT item overrides the physical port normally associated
* with the specified DPDK input port (port_id). This item can be provided
* several times to match additional physical ports.
*
* Note that physical ports are not necessarily tied to DPDK input ports
* (port_id) when those are not under DPDK control. Possible values are
* specific to each device, they are not necessarily indexed from zero and
* may not be contiguous.
*
* As a device property, the list of allowed values as well as the value
* associated with a port_id should be retrieved by other means.
*
* A zeroed mask can be used to match any port index.
*/
struct rte_flow_item_phy_port {
uint32_t index; /**< Physical port index. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_PHY_PORT. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_phy_port rte_flow_item_phy_port_mask = {
.index = 0x00000000,
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_PORT_ID
*
* Matches traffic originating from (ingress) or going to (egress) a given
* DPDK port ID.
*
* Normally only supported if the port ID in question is known by the
* underlying PMD and related to the device the flow rule is created
* against.
*
* This must not be confused with @p PHY_PORT which refers to the physical
* port of a device, whereas @p PORT_ID refers to a struct rte_eth_dev
* object on the application side (also known as "port representor"
* depending on the kind of underlying device).
*/
struct rte_flow_item_port_id {
uint32_t id; /**< DPDK port ID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_PORT_ID. */
#ifndef __cplusplus
static const struct rte_flow_item_port_id rte_flow_item_port_id_mask = {
.id = 0xffffffff,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_RAW
*
* Matches a byte string of a given length at a given offset.
*
* Offset is either absolute (using the start of the packet) or relative to
* the end of the previous matched item in the stack, in which case negative
* values are allowed.
*
* If search is enabled, offset is used as the starting point. The search
* area can be delimited by setting limit to a nonzero value, which is the
* maximum number of bytes after offset where the pattern may start.
*
* Matching a zero-length pattern is allowed, doing so resets the relative
* offset for subsequent items.
*
* This type does not support ranges (struct rte_flow_item.last).
*/
struct rte_flow_item_raw {
uint32_t relative:1; /**< Look for pattern after the previous item. */
uint32_t search:1; /**< Search pattern from offset (see also limit). */
uint32_t reserved:30; /**< Reserved, must be set to zero. */
int32_t offset; /**< Absolute or relative offset for pattern. */
uint16_t limit; /**< Search area limit for start of pattern. */
uint16_t length; /**< Pattern length. */
const uint8_t *pattern; /**< Byte string to look for. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
.relative = 1,
.search = 1,
.reserved = 0x3fffffff,
.offset = 0xffffffff,
.limit = 0xffff,
.length = 0xffff,
.pattern = NULL,
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_ETH
*
* Matches an Ethernet header.
ethdev: fix TPID handling in flow API TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not consistent with the normal stacking order of pattern items, which is confusing to applications. Problem is that when followed by one of these layers, the EtherType field of the preceding layer keeps its "inner" definition, and the "outer" TPID is provided by the subsequent layer, the reverse of how a packet looks like on the wire: Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ] rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ] Worse, when QinQ is involved, the stacking order of VLAN layers is unspecified. It is unclear whether it should be reversed (innermost to outermost) as well given TPID applies to the previous layer: Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ] rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ] rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ] While specifying EtherType/TPID is hopefully rarely necessary, the stacking order in case of QinQ and the lack of documentation remain an issue. This patch replaces TPID in the VLAN pattern item with an inner EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr), clarifies documentation and updates all relevant code. It breaks ABI compatibility for the following public functions: - rte_flow_copy() - rte_flow_create() - rte_flow_query() - rte_flow_validate() Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern items: - bnxt: EtherType matching is supported with and without VLAN, but TPID matching is not and triggers an error. - e1000: EtherType matching is only supported with the ETHERTYPE filter, which does not support VLAN matching, therefore no impact. - enic: same as bnxt. - i40e: same as bnxt with existing FDIR limitations on allowed EtherType values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support EtherType matching. - ixgbe: same as e1000, with additional minor change to rely on the new E-Tag macro definition. - mlx4: EtherType/TPID matching is not supported, no impact. - mlx5: same as bnxt. - mvpp2: same as bnxt. - sfc: same as bnxt. - tap: same as bnxt. Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API") Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter") Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
*
* Inside @p hdr field, the sub-field @p ether_type stands either for EtherType
* or TPID, depending on whether the item is followed by a VLAN item or not. If
* two VLAN items follow, the sub-field refers to the outer one, which, in turn,
* contains the inner TPID in the similar header field. The innermost VLAN item
* contains a layer-3 EtherType. All of that follows the order seen on the wire.
*
* If the field in question contains a TPID value, only tagged packets with the
* specified TPID will match the pattern. Alternatively, it's possible to match
* any type of tagged packets by means of the field @p has_vlan rather than use
* the EtherType/TPID field. Also, it's possible to leave the two fields unused.
* If this is the case, both tagged and untagged packets will match the pattern.
*/
RTE_STD_C11
struct rte_flow_item_eth {
union {
struct {
/*
* These fields are retained for compatibility.
* Please switch to the new header field below.
*/
struct rte_ether_addr dst; /**< Destination MAC. */
struct rte_ether_addr src; /**< Source MAC. */
rte_be16_t type; /**< EtherType or TPID. */
};
struct rte_ether_hdr hdr;
};
uint32_t has_vlan:1; /**< Packet header contains at least one VLAN. */
uint32_t reserved:31; /**< Reserved, must be zero. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
.hdr.d_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.hdr.s_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.hdr.ether_type = RTE_BE16(0x0000),
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_VLAN
*
* Matches an 802.1Q/ad VLAN tag.
*
ethdev: fix TPID handling in flow API TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not consistent with the normal stacking order of pattern items, which is confusing to applications. Problem is that when followed by one of these layers, the EtherType field of the preceding layer keeps its "inner" definition, and the "outer" TPID is provided by the subsequent layer, the reverse of how a packet looks like on the wire: Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ] rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ] Worse, when QinQ is involved, the stacking order of VLAN layers is unspecified. It is unclear whether it should be reversed (innermost to outermost) as well given TPID applies to the previous layer: Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ] rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ] rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ] While specifying EtherType/TPID is hopefully rarely necessary, the stacking order in case of QinQ and the lack of documentation remain an issue. This patch replaces TPID in the VLAN pattern item with an inner EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr), clarifies documentation and updates all relevant code. It breaks ABI compatibility for the following public functions: - rte_flow_copy() - rte_flow_create() - rte_flow_query() - rte_flow_validate() Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern items: - bnxt: EtherType matching is supported with and without VLAN, but TPID matching is not and triggers an error. - e1000: EtherType matching is only supported with the ETHERTYPE filter, which does not support VLAN matching, therefore no impact. - enic: same as bnxt. - i40e: same as bnxt with existing FDIR limitations on allowed EtherType values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support EtherType matching. - ixgbe: same as e1000, with additional minor change to rely on the new E-Tag macro definition. - mlx4: EtherType/TPID matching is not supported, no impact. - mlx5: same as bnxt. - mvpp2: same as bnxt. - sfc: same as bnxt. - tap: same as bnxt. Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API") Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter") Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
* The corresponding standard outer EtherType (TPID) values are
net: add rte prefix to ether defines Add 'RTE_' prefix to defines: - rename ETHER_ADDR_LEN as RTE_ETHER_ADDR_LEN. - rename ETHER_TYPE_LEN as RTE_ETHER_TYPE_LEN. - rename ETHER_CRC_LEN as RTE_ETHER_CRC_LEN. - rename ETHER_HDR_LEN as RTE_ETHER_HDR_LEN. - rename ETHER_MIN_LEN as RTE_ETHER_MIN_LEN. - rename ETHER_MAX_LEN as RTE_ETHER_MAX_LEN. - rename ETHER_MTU as RTE_ETHER_MTU. - rename ETHER_MAX_VLAN_FRAME_LEN as RTE_ETHER_MAX_VLAN_FRAME_LEN. - rename ETHER_MAX_VLAN_ID as RTE_ETHER_MAX_VLAN_ID. - rename ETHER_MAX_JUMBO_FRAME_LEN as RTE_ETHER_MAX_JUMBO_FRAME_LEN. - rename ETHER_MIN_MTU as RTE_ETHER_MIN_MTU. - rename ETHER_LOCAL_ADMIN_ADDR as RTE_ETHER_LOCAL_ADMIN_ADDR. - rename ETHER_GROUP_ADDR as RTE_ETHER_GROUP_ADDR. - rename ETHER_TYPE_IPv4 as RTE_ETHER_TYPE_IPv4. - rename ETHER_TYPE_IPv6 as RTE_ETHER_TYPE_IPv6. - rename ETHER_TYPE_ARP as RTE_ETHER_TYPE_ARP. - rename ETHER_TYPE_VLAN as RTE_ETHER_TYPE_VLAN. - rename ETHER_TYPE_RARP as RTE_ETHER_TYPE_RARP. - rename ETHER_TYPE_QINQ as RTE_ETHER_TYPE_QINQ. - rename ETHER_TYPE_ETAG as RTE_ETHER_TYPE_ETAG. - rename ETHER_TYPE_1588 as RTE_ETHER_TYPE_1588. - rename ETHER_TYPE_SLOW as RTE_ETHER_TYPE_SLOW. - rename ETHER_TYPE_TEB as RTE_ETHER_TYPE_TEB. - rename ETHER_TYPE_LLDP as RTE_ETHER_TYPE_LLDP. - rename ETHER_TYPE_MPLS as RTE_ETHER_TYPE_MPLS. - rename ETHER_TYPE_MPLSM as RTE_ETHER_TYPE_MPLSM. - rename ETHER_VXLAN_HLEN as RTE_ETHER_VXLAN_HLEN. - rename ETHER_ADDR_FMT_SIZE as RTE_ETHER_ADDR_FMT_SIZE. - rename VXLAN_GPE_TYPE_IPV4 as RTE_VXLAN_GPE_TYPE_IPV4. - rename VXLAN_GPE_TYPE_IPV6 as RTE_VXLAN_GPE_TYPE_IPV6. - rename VXLAN_GPE_TYPE_ETH as RTE_VXLAN_GPE_TYPE_ETH. - rename VXLAN_GPE_TYPE_NSH as RTE_VXLAN_GPE_TYPE_NSH. - rename VXLAN_GPE_TYPE_MPLS as RTE_VXLAN_GPE_TYPE_MPLS. - rename VXLAN_GPE_TYPE_GBP as RTE_VXLAN_GPE_TYPE_GBP. - rename VXLAN_GPE_TYPE_VBNG as RTE_VXLAN_GPE_TYPE_VBNG. - rename ETHER_VXLAN_GPE_HLEN as RTE_ETHER_VXLAN_GPE_HLEN. Do not update the command line library to avoid adding a dependency to librte_net. Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Reviewed-by: Stephen Hemminger <stephen@networkplumber.org> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2019-05-21 16:13:05 +00:00
* RTE_ETHER_TYPE_VLAN or RTE_ETHER_TYPE_QINQ. It can be overridden by
* the preceding pattern item.
* If a @p VLAN item is present in the pattern, then only tagged packets will
* match the pattern.
* The field @p has_more_vlan can be used to match any type of tagged packets,
* instead of using the @p eth_proto field of @p hdr.
* If the @p eth_proto of @p hdr and @p has_more_vlan fields are not specified,
* then any tagged packets will match the pattern.
*/
RTE_STD_C11
struct rte_flow_item_vlan {
union {
struct {
/*
* These fields are retained for compatibility.
* Please switch to the new header field below.
*/
rte_be16_t tci; /**< Tag control information. */
rte_be16_t inner_type; /**< Inner EtherType or TPID. */
};
struct rte_vlan_hdr hdr;
};
uint32_t has_more_vlan:1;
/**< Packet header contains at least one more VLAN, after this VLAN. */
uint32_t reserved:31; /**< Reserved, must be zero. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
.hdr.vlan_tci = RTE_BE16(0x0fff),
.hdr.eth_proto = RTE_BE16(0x0000),
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_IPV4
*
* Matches an IPv4 header.
*
* Note: IPv4 options are handled by dedicated pattern items.
*/
struct rte_flow_item_ipv4 {
struct rte_ipv4_hdr hdr; /**< IPv4 header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
.hdr = {
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
},
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_IPV6.
*
* Matches an IPv6 header.
*
ethdev: add extensions attributes to IPv6 item Using the current implementation of DPDK, an application cannot match on IPv6 packets, based on the existing extension headers, in a simple way. Field 'Next Header' in IPv6 header indicates type of the first extension header only. Following extension headers can't be identified by inspecting the IPv6 header. As a result, the existence or absence of specific extension headers can't be used for packet matching. For example, fragmented IPv6 packets contain a dedicated extension header (which is implemented in a later patch of this series). Non-fragmented packets don't contain the fragment extension header. For an application to match on non-fragmented IPv6 packets, the current implementation doesn't provide a suitable solution. Matching on the Next Header field is not sufficient, since additional extension headers might be present in the same packet. To match on fragmented IPv6 packets, the same difficulty exists. This patch implements the update as detailed in RFC [1]. A set of additional values will be added to IPv6 header struct. These values will indicate the existence of every defined extension header type, providing simple means for identification of existing extensions in the packet header. Continuing the above example, fragmented packets can be identified using the specific value indicating existence of fragment extension header. To match on non-fragmented IPv6 packets, need to use has_frag_ext 0. To match on fragmented IPv6 packets, need to use has_frag_ext 1. To match on any IPv6 packets, the has_frag_ext field should not be specified for match. [1] https://mails.dpdk.org/archives/dev/2020-August/177257.html Signed-off-by: Dekel Peled <dekelp@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-14 16:35:47 +00:00
* Dedicated flags indicate if header contains specific extension headers.
*/
struct rte_flow_item_ipv6 {
struct rte_ipv6_hdr hdr; /**< IPv6 header definition. */
ethdev: add extensions attributes to IPv6 item Using the current implementation of DPDK, an application cannot match on IPv6 packets, based on the existing extension headers, in a simple way. Field 'Next Header' in IPv6 header indicates type of the first extension header only. Following extension headers can't be identified by inspecting the IPv6 header. As a result, the existence or absence of specific extension headers can't be used for packet matching. For example, fragmented IPv6 packets contain a dedicated extension header (which is implemented in a later patch of this series). Non-fragmented packets don't contain the fragment extension header. For an application to match on non-fragmented IPv6 packets, the current implementation doesn't provide a suitable solution. Matching on the Next Header field is not sufficient, since additional extension headers might be present in the same packet. To match on fragmented IPv6 packets, the same difficulty exists. This patch implements the update as detailed in RFC [1]. A set of additional values will be added to IPv6 header struct. These values will indicate the existence of every defined extension header type, providing simple means for identification of existing extensions in the packet header. Continuing the above example, fragmented packets can be identified using the specific value indicating existence of fragment extension header. To match on non-fragmented IPv6 packets, need to use has_frag_ext 0. To match on fragmented IPv6 packets, need to use has_frag_ext 1. To match on any IPv6 packets, the has_frag_ext field should not be specified for match. [1] https://mails.dpdk.org/archives/dev/2020-August/177257.html Signed-off-by: Dekel Peled <dekelp@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-14 16:35:47 +00:00
uint32_t has_hop_ext:1;
/**< Header contains Hop-by-Hop Options extension header. */
uint32_t has_route_ext:1;
/**< Header contains Routing extension header. */
uint32_t has_frag_ext:1;
/**< Header contains Fragment extension header. */
uint32_t has_auth_ext:1;
/**< Header contains Authentication extension header. */
uint32_t has_esp_ext:1;
/**< Header contains Encapsulation Security Payload extension header. */
uint32_t has_dest_ext:1;
/**< Header contains Destination Options extension header. */
uint32_t has_mobil_ext:1;
/**< Header contains Mobility extension header. */
uint32_t has_hip_ext:1;
/**< Header contains Host Identity Protocol extension header. */
uint32_t has_shim6_ext:1;
/**< Header contains Shim6 Protocol extension header. */
uint32_t reserved:23;
/**< Reserved for future extension headers, must be zero. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
.hdr = {
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
.dst_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
},
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_ICMP.
*
* Matches an ICMP header.
*/
struct rte_flow_item_icmp {
struct rte_icmp_hdr hdr; /**< ICMP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
.hdr = {
.icmp_type = 0xff,
.icmp_code = 0xff,
},
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_UDP.
*
* Matches a UDP header.
*/
struct rte_flow_item_udp {
struct rte_udp_hdr hdr; /**< UDP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
.hdr = {
.src_port = RTE_BE16(0xffff),
.dst_port = RTE_BE16(0xffff),
},
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_TCP.
*
* Matches a TCP header.
*/
struct rte_flow_item_tcp {
struct rte_tcp_hdr hdr; /**< TCP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
.hdr = {
.src_port = RTE_BE16(0xffff),
.dst_port = RTE_BE16(0xffff),
},
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_SCTP.
*
* Matches a SCTP header.
*/
struct rte_flow_item_sctp {
struct rte_sctp_hdr hdr; /**< SCTP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
.hdr = {
.src_port = RTE_BE16(0xffff),
.dst_port = RTE_BE16(0xffff),
},
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_VXLAN.
*
* Matches a VXLAN header (RFC 7348).
*/
RTE_STD_C11
struct rte_flow_item_vxlan {
union {
struct {
/*
* These fields are retained for compatibility.
* Please switch to the new header field below.
*/
uint8_t flags; /**< Normally 0x08 (I flag). */
uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */
uint8_t vni[3]; /**< VXLAN identifier. */
uint8_t rsvd1; /**< Reserved, normally 0x00. */
};
struct rte_vxlan_hdr hdr;
};
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
.hdr.vx_vni = RTE_BE32(0xffffff00), /* (0xffffff << 8) */
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_E_TAG.
*
* Matches a E-tag header.
ethdev: fix TPID handling in flow API TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not consistent with the normal stacking order of pattern items, which is confusing to applications. Problem is that when followed by one of these layers, the EtherType field of the preceding layer keeps its "inner" definition, and the "outer" TPID is provided by the subsequent layer, the reverse of how a packet looks like on the wire: Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ] rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ] Worse, when QinQ is involved, the stacking order of VLAN layers is unspecified. It is unclear whether it should be reversed (innermost to outermost) as well given TPID applies to the previous layer: Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ] rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ] rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ] While specifying EtherType/TPID is hopefully rarely necessary, the stacking order in case of QinQ and the lack of documentation remain an issue. This patch replaces TPID in the VLAN pattern item with an inner EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr), clarifies documentation and updates all relevant code. It breaks ABI compatibility for the following public functions: - rte_flow_copy() - rte_flow_create() - rte_flow_query() - rte_flow_validate() Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern items: - bnxt: EtherType matching is supported with and without VLAN, but TPID matching is not and triggers an error. - e1000: EtherType matching is only supported with the ETHERTYPE filter, which does not support VLAN matching, therefore no impact. - enic: same as bnxt. - i40e: same as bnxt with existing FDIR limitations on allowed EtherType values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support EtherType matching. - ixgbe: same as e1000, with additional minor change to rely on the new E-Tag macro definition. - mlx4: EtherType/TPID matching is not supported, no impact. - mlx5: same as bnxt. - mvpp2: same as bnxt. - sfc: same as bnxt. - tap: same as bnxt. Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API") Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter") Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
*
* The corresponding standard outer EtherType (TPID) value is
net: add rte prefix to ether defines Add 'RTE_' prefix to defines: - rename ETHER_ADDR_LEN as RTE_ETHER_ADDR_LEN. - rename ETHER_TYPE_LEN as RTE_ETHER_TYPE_LEN. - rename ETHER_CRC_LEN as RTE_ETHER_CRC_LEN. - rename ETHER_HDR_LEN as RTE_ETHER_HDR_LEN. - rename ETHER_MIN_LEN as RTE_ETHER_MIN_LEN. - rename ETHER_MAX_LEN as RTE_ETHER_MAX_LEN. - rename ETHER_MTU as RTE_ETHER_MTU. - rename ETHER_MAX_VLAN_FRAME_LEN as RTE_ETHER_MAX_VLAN_FRAME_LEN. - rename ETHER_MAX_VLAN_ID as RTE_ETHER_MAX_VLAN_ID. - rename ETHER_MAX_JUMBO_FRAME_LEN as RTE_ETHER_MAX_JUMBO_FRAME_LEN. - rename ETHER_MIN_MTU as RTE_ETHER_MIN_MTU. - rename ETHER_LOCAL_ADMIN_ADDR as RTE_ETHER_LOCAL_ADMIN_ADDR. - rename ETHER_GROUP_ADDR as RTE_ETHER_GROUP_ADDR. - rename ETHER_TYPE_IPv4 as RTE_ETHER_TYPE_IPv4. - rename ETHER_TYPE_IPv6 as RTE_ETHER_TYPE_IPv6. - rename ETHER_TYPE_ARP as RTE_ETHER_TYPE_ARP. - rename ETHER_TYPE_VLAN as RTE_ETHER_TYPE_VLAN. - rename ETHER_TYPE_RARP as RTE_ETHER_TYPE_RARP. - rename ETHER_TYPE_QINQ as RTE_ETHER_TYPE_QINQ. - rename ETHER_TYPE_ETAG as RTE_ETHER_TYPE_ETAG. - rename ETHER_TYPE_1588 as RTE_ETHER_TYPE_1588. - rename ETHER_TYPE_SLOW as RTE_ETHER_TYPE_SLOW. - rename ETHER_TYPE_TEB as RTE_ETHER_TYPE_TEB. - rename ETHER_TYPE_LLDP as RTE_ETHER_TYPE_LLDP. - rename ETHER_TYPE_MPLS as RTE_ETHER_TYPE_MPLS. - rename ETHER_TYPE_MPLSM as RTE_ETHER_TYPE_MPLSM. - rename ETHER_VXLAN_HLEN as RTE_ETHER_VXLAN_HLEN. - rename ETHER_ADDR_FMT_SIZE as RTE_ETHER_ADDR_FMT_SIZE. - rename VXLAN_GPE_TYPE_IPV4 as RTE_VXLAN_GPE_TYPE_IPV4. - rename VXLAN_GPE_TYPE_IPV6 as RTE_VXLAN_GPE_TYPE_IPV6. - rename VXLAN_GPE_TYPE_ETH as RTE_VXLAN_GPE_TYPE_ETH. - rename VXLAN_GPE_TYPE_NSH as RTE_VXLAN_GPE_TYPE_NSH. - rename VXLAN_GPE_TYPE_MPLS as RTE_VXLAN_GPE_TYPE_MPLS. - rename VXLAN_GPE_TYPE_GBP as RTE_VXLAN_GPE_TYPE_GBP. - rename VXLAN_GPE_TYPE_VBNG as RTE_VXLAN_GPE_TYPE_VBNG. - rename ETHER_VXLAN_GPE_HLEN as RTE_ETHER_VXLAN_GPE_HLEN. Do not update the command line library to avoid adding a dependency to librte_net. Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Reviewed-by: Stephen Hemminger <stephen@networkplumber.org> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2019-05-21 16:13:05 +00:00
* RTE_ETHER_TYPE_ETAG. It can be overridden by the preceding pattern item.
*/
struct rte_flow_item_e_tag {
/**
* E-Tag control information (E-TCI).
* E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
*/
rte_be16_t epcp_edei_in_ecid_b;
/** Reserved (2b), GRP (2b), E-CID base (12b). */
rte_be16_t rsvd_grp_ecid_b;
uint8_t in_ecid_e; /**< Ingress E-CID ext. */
uint8_t ecid_e; /**< E-CID ext. */
ethdev: fix TPID handling in flow API TPID handling in rte_flow VLAN and E_TAG pattern item definitions is not consistent with the normal stacking order of pattern items, which is confusing to applications. Problem is that when followed by one of these layers, the EtherType field of the preceding layer keeps its "inner" definition, and the "outer" TPID is provided by the subsequent layer, the reverse of how a packet looks like on the wire: Wire: [ ETH TPID = A | VLAN EtherType = B | B DATA ] rte_flow: [ ETH EtherType = B | VLAN TPID = A | B DATA ] Worse, when QinQ is involved, the stacking order of VLAN layers is unspecified. It is unclear whether it should be reversed (innermost to outermost) as well given TPID applies to the previous layer: Wire: [ ETH TPID = A | VLAN TPID = B | VLAN EtherType = C | C DATA ] rte_flow 1: [ ETH EtherType = C | VLAN TPID = B | VLAN TPID = A | C DATA ] rte_flow 2: [ ETH EtherType = C | VLAN TPID = A | VLAN TPID = B | C DATA ] While specifying EtherType/TPID is hopefully rarely necessary, the stacking order in case of QinQ and the lack of documentation remain an issue. This patch replaces TPID in the VLAN pattern item with an inner EtherType/TPID as is usually done everywhere else (e.g. struct vlan_hdr), clarifies documentation and updates all relevant code. It breaks ABI compatibility for the following public functions: - rte_flow_copy() - rte_flow_create() - rte_flow_query() - rte_flow_validate() Summary of changes for PMDs that implement ETH, VLAN or E_TAG pattern items: - bnxt: EtherType matching is supported with and without VLAN, but TPID matching is not and triggers an error. - e1000: EtherType matching is only supported with the ETHERTYPE filter, which does not support VLAN matching, therefore no impact. - enic: same as bnxt. - i40e: same as bnxt with existing FDIR limitations on allowed EtherType values. The remaining filter types (VXLAN, NVGRE, QINQ) do not support EtherType matching. - ixgbe: same as e1000, with additional minor change to rely on the new E-Tag macro definition. - mlx4: EtherType/TPID matching is not supported, no impact. - mlx5: same as bnxt. - mvpp2: same as bnxt. - sfc: same as bnxt. - tap: same as bnxt. Fixes: b1a4b4cbc0a8 ("ethdev: introduce generic flow API") Fixes: 99e7003831c3 ("net/ixgbe: parse L2 tunnel filter") Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:56 +00:00
rte_be16_t inner_type; /**< Inner EtherType or TPID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
#ifndef __cplusplus
static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
.rsvd_grp_ecid_b = RTE_BE16(0x3fff),
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_NVGRE.
*
* Matches a NVGRE header.
*/
struct rte_flow_item_nvgre {
/**
* Checksum (1b), undefined (1b), key bit (1b), sequence number (1b),
* reserved 0 (9b), version (3b).
*
* c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
*/
rte_be16_t c_k_s_rsvd0_ver;
rte_be16_t protocol; /**< Protocol type (0x6558). */
uint8_t tni[3]; /**< Virtual subnet ID. */
uint8_t flow_id; /**< Flow ID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_NVGRE. */
#ifndef __cplusplus
static const struct rte_flow_item_nvgre rte_flow_item_nvgre_mask = {
.tni = "\xff\xff\xff",
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_MPLS.
*
* Matches a MPLS header.
*/
struct rte_flow_item_mpls {
/**
* Label (20b), TC (3b), Bottom of Stack (1b).
*/
uint8_t label_tc_s[3];
uint8_t ttl; /** Time-to-Live. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_MPLS. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_mpls rte_flow_item_mpls_mask = {
.label_tc_s = "\xff\xff\xf0",
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_GRE.
*
* Matches a GRE header.
*/
struct rte_flow_item_gre {
/**
* Checksum (1b), reserved 0 (12b), version (3b).
* Refer to RFC 2784.
*/
rte_be16_t c_rsvd0_ver;
rte_be16_t protocol; /**< Protocol type. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#ifndef __cplusplus
static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
.protocol = RTE_BE16(0xffff),
};
ethdev: fix flow API for C++ This commit addresses the following compilation errors: In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:428:2: error: expected primary-expression before '.' token [...] build/include/rte_flow.h:469:1: sorry, unimplemented: non-trivial designated initializers not supported [...] In file included from build/include/rte_flow_driver.h:50:0, from /tmp/check-includes.sh.1397.cc:1: build/include/rte_flow.h:631:1: error: C99 designator 'label_tc_s' outside aggregate initializer [...] build/include/rte_flow.h:631:1: error: initializer-string for array of chars is too long [-fpermissive] [...] build/include/rte_flow.h:650:1: sorry, unimplemented: non-trivial designated initializers not supported [...] C++ does not support the C99-style designated initializers used in this file for the default item masks. While the resulting symbols are primarily useful to PMDs (written in C), they are exposed as part of the public API for documentation purposes and to assist application writers. Considering that: - using pre-C99 initialization style for compatibility with C++ would render them difficult to understand (all struct members must be initialized) - using both initialization styles would be needlessly verbose - not exposing them at all would defeat their purpose - applications do not normally need these symbols at run time This commit hides these symbols from C++ applications. Specific C++ initializers will be added later if necessary. Fixes: 6de5c0f1302c ("ethdev: define default item masks in flow API") Fixes: 7cd048321d1d ("ethdev: add MPLS and GRE flow API items") Cc: stable@dpdk.org Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Acked-by: Beilei Xing <beilei.xing@intel.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
2017-04-26 12:07:19 +00:00
#endif
/**
* RTE_FLOW_ITEM_TYPE_FUZZY
*
* Fuzzy pattern match, expect faster than default.
*
* This is for device that support fuzzy match option.
* Usually a fuzzy match is fast but the cost is accuracy.
* i.e. Signature Match only match pattern's hash value, but it is
* possible two different patterns have the same hash value.
*
* Matching accuracy level can be configure by threshold.
* Driver can divide the range of threshold and map to different
* accuracy levels that device support.
*
* Threshold 0 means perfect match (no fuzziness), while threshold
* 0xffffffff means fuzziest match.
*/
struct rte_flow_item_fuzzy {
uint32_t thresh; /**< Accuracy threshold. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_FUZZY. */
#ifndef __cplusplus
static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
.thresh = 0xffffffff,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_GTP.
*
* Matches a GTPv1 header.
*/
struct rte_flow_item_gtp {
/**
* Version (3b), protocol type (1b), reserved (1b),
* Extension header flag (1b),
* Sequence number flag (1b),
* N-PDU number flag (1b).
*/
uint8_t v_pt_rsv_flags;
uint8_t msg_type; /**< Message type. */
rte_be16_t msg_len; /**< Message length. */
rte_be32_t teid; /**< Tunnel endpoint identifier. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
#ifndef __cplusplus
static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
.teid = RTE_BE32(0xffffffff),
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ESP
*
* Matches an ESP header.
*/
struct rte_flow_item_esp {
struct rte_esp_hdr hdr; /**< ESP header definition. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ESP. */
#ifndef __cplusplus
static const struct rte_flow_item_esp rte_flow_item_esp_mask = {
.hdr = {
.spi = RTE_BE32(0xffffffff),
},
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_GENEVE.
*
* Matches a GENEVE header.
*/
struct rte_flow_item_geneve {
/**
* Version (2b), length of the options fields (6b), OAM packet (1b),
* critical options present (1b), reserved 0 (6b).
*/
rte_be16_t ver_opt_len_o_c_rsvd0;
rte_be16_t protocol; /**< Protocol type. */
uint8_t vni[3]; /**< Virtual Network Identifier. */
uint8_t rsvd1; /**< Reserved, normally 0x00. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_GENEVE. */
#ifndef __cplusplus
static const struct rte_flow_item_geneve rte_flow_item_geneve_mask = {
.vni = "\xff\xff\xff",
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_VXLAN_GPE (draft-ietf-nvo3-vxlan-gpe-05).
*
* Matches a VXLAN-GPE header.
*/
struct rte_flow_item_vxlan_gpe {
uint8_t flags; /**< Normally 0x0c (I and P flags). */
uint8_t rsvd0[2]; /**< Reserved, normally 0x0000. */
uint8_t protocol; /**< Protocol type. */
uint8_t vni[3]; /**< VXLAN identifier. */
uint8_t rsvd1; /**< Reserved, normally 0x00. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN_GPE. */
#ifndef __cplusplus
static const struct rte_flow_item_vxlan_gpe rte_flow_item_vxlan_gpe_mask = {
.vni = "\xff\xff\xff",
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4
*
* Matches an ARP header for Ethernet/IPv4.
*/
struct rte_flow_item_arp_eth_ipv4 {
rte_be16_t hrd; /**< Hardware type, normally 1. */
rte_be16_t pro; /**< Protocol type, normally 0x0800. */
uint8_t hln; /**< Hardware address length, normally 6. */
uint8_t pln; /**< Protocol address length, normally 4. */
rte_be16_t op; /**< Opcode (1 for request, 2 for reply). */
struct rte_ether_addr sha; /**< Sender hardware address. */
rte_be32_t spa; /**< Sender IPv4 address. */
struct rte_ether_addr tha; /**< Target hardware address. */
rte_be32_t tpa; /**< Target IPv4 address. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4. */
#ifndef __cplusplus
static const struct rte_flow_item_arp_eth_ipv4
rte_flow_item_arp_eth_ipv4_mask = {
.sha.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.spa = RTE_BE32(0xffffffff),
.tha.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.tpa = RTE_BE32(0xffffffff),
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_IPV6_EXT
*
* Matches the presence of any IPv6 extension header.
*
* Normally preceded by any of:
*
* - RTE_FLOW_ITEM_TYPE_IPV6
* - RTE_FLOW_ITEM_TYPE_IPV6_EXT
*/
struct rte_flow_item_ipv6_ext {
uint8_t next_hdr; /**< Next header. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_IPV6_EXT. */
#ifndef __cplusplus
static const
struct rte_flow_item_ipv6_ext rte_flow_item_ipv6_ext_mask = {
.next_hdr = 0xff,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT
*
* Matches the presence of IPv6 fragment extension header.
*
* Preceded by any of:
*
* - RTE_FLOW_ITEM_TYPE_IPV6
* - RTE_FLOW_ITEM_TYPE_IPV6_EXT
*/
struct rte_flow_item_ipv6_frag_ext {
struct rte_ipv6_fragment_ext hdr;
};
/**
* RTE_FLOW_ITEM_TYPE_ICMP6
*
* Matches any ICMPv6 header.
*/
struct rte_flow_item_icmp6 {
uint8_t type; /**< ICMPv6 type. */
uint8_t code; /**< ICMPv6 code. */
uint16_t checksum; /**< ICMPv6 checksum. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6. */
#ifndef __cplusplus
static const struct rte_flow_item_icmp6 rte_flow_item_icmp6_mask = {
.type = 0xff,
.code = 0xff,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS
*
* Matches an ICMPv6 neighbor discovery solicitation.
*/
struct rte_flow_item_icmp6_nd_ns {
uint8_t type; /**< ICMPv6 type, normally 135. */
uint8_t code; /**< ICMPv6 code, normally 0. */
rte_be16_t checksum; /**< ICMPv6 checksum. */
rte_be32_t reserved; /**< Reserved, normally 0. */
uint8_t target_addr[16]; /**< Target address. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS. */
#ifndef __cplusplus
static const
struct rte_flow_item_icmp6_nd_ns rte_flow_item_icmp6_nd_ns_mask = {
.target_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA
*
* Matches an ICMPv6 neighbor discovery advertisement.
*/
struct rte_flow_item_icmp6_nd_na {
uint8_t type; /**< ICMPv6 type, normally 136. */
uint8_t code; /**< ICMPv6 code, normally 0. */
rte_be16_t checksum; /**< ICMPv6 checksum. */
/**
* Route flag (1b), solicited flag (1b), override flag (1b),
* reserved (29b).
*/
rte_be32_t rso_reserved;
uint8_t target_addr[16]; /**< Target address. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA. */
#ifndef __cplusplus
static const
struct rte_flow_item_icmp6_nd_na rte_flow_item_icmp6_nd_na_mask = {
.target_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
*
* Matches the presence of any ICMPv6 neighbor discovery option.
*
* Normally preceded by any of:
*
* - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA
* - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS
* - RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
*/
struct rte_flow_item_icmp6_nd_opt {
uint8_t type; /**< ND option type. */
uint8_t length; /**< ND option length. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT. */
#ifndef __cplusplus
static const struct rte_flow_item_icmp6_nd_opt
rte_flow_item_icmp6_nd_opt_mask = {
.type = 0xff,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH
*
* Matches an ICMPv6 neighbor discovery source Ethernet link-layer address
* option.
*
* Normally preceded by any of:
*
* - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA
* - RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
*/
struct rte_flow_item_icmp6_nd_opt_sla_eth {
uint8_t type; /**< ND option type, normally 1. */
uint8_t length; /**< ND option length, normally 1. */
struct rte_ether_addr sla; /**< Source Ethernet LLA. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH. */
#ifndef __cplusplus
static const struct rte_flow_item_icmp6_nd_opt_sla_eth
rte_flow_item_icmp6_nd_opt_sla_eth_mask = {
.sla.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH
*
* Matches an ICMPv6 neighbor discovery target Ethernet link-layer address
* option.
*
* Normally preceded by any of:
*
* - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS
* - RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
*/
struct rte_flow_item_icmp6_nd_opt_tla_eth {
uint8_t type; /**< ND option type, normally 2. */
uint8_t length; /**< ND option length, normally 1. */
struct rte_ether_addr tla; /**< Target Ethernet LLA. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH. */
#ifndef __cplusplus
static const struct rte_flow_item_icmp6_nd_opt_tla_eth
rte_flow_item_icmp6_nd_opt_tla_eth_mask = {
.tla.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
#endif
/**
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
* RTE_FLOW_ITEM_TYPE_META
*
ethdev: move egress metadata to dynamic field The dynamic mbuf fields were introduced by [1]. The egress metadata is good candidate to be moved from statically allocated field tx_metadata to dynamic one. Because mbufs are used in half-duplex fashion only, it is safe to share this dynamic field with ingress metadata. The shared dynamic field contains either egress (if application going to transmit mbuf with tx_burst) or ingress (if mbuf is received with rx_burst) metadata and can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_TX_DYNF_METADATA/PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior accessing the data. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. DEV_TX_OFFLOAD_MATCH_METADATA offload and configuration flag is removed. The metadata support in PMDs is engaged on dynamic field registration. Metadata feature is getting complex. We might have some set of actions and items that might be supported by PMDs in multiple combinations, the supported values and masks are the subjects to query by perfroming trials (with rte_flow_validate). [1] http://patches.dpdk.org/patch/62040/ Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:31 +00:00
* Matches a specified metadata value. On egress, metadata can be set
* either by mbuf dynamic metadata field with PKT_TX_DYNF_METADATA flag or
* RTE_FLOW_ACTION_TYPE_SET_META. On ingress, RTE_FLOW_ACTION_TYPE_SET_META
* sets metadata for a packet and the metadata will be reported via mbuf
* metadata dynamic field with PKT_RX_DYNF_METADATA flag. The dynamic mbuf
* field must be registered in advance by rte_flow_dynf_metadata_register().
*/
struct rte_flow_item_meta {
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
uint32_t data;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_META. */
#ifndef __cplusplus
static const struct rte_flow_item_meta rte_flow_item_meta_mask = {
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
.data = UINT32_MAX,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_GTP_PSC.
*
* Matches a GTP PDU extension header with type 0x85.
*/
struct rte_flow_item_gtp_psc {
uint8_t pdu_type; /**< PDU type. */
uint8_t qfi; /**< PPP, RQI, QoS flow identifier. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_GTP_PSC. */
#ifndef __cplusplus
static const struct rte_flow_item_gtp_psc
rte_flow_item_gtp_psc_mask = {
.qfi = 0xff,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_PPPOE.
*
* Matches a PPPoE header.
*/
struct rte_flow_item_pppoe {
/**
* Version (4b), type (4b).
*/
uint8_t version_type;
uint8_t code; /**< Message type. */
rte_be16_t session_id; /**< Session identifier. */
rte_be16_t length; /**< Payload length. */
};
/**
* RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID.
*
* Matches a PPPoE optional proto_id field.
*
* It only applies to PPPoE session packets.
*
* Normally preceded by any of:
*
* - RTE_FLOW_ITEM_TYPE_PPPOE
* - RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID
*/
struct rte_flow_item_pppoe_proto_id {
rte_be16_t proto_id; /**< PPP protocol identifier. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID. */
#ifndef __cplusplus
static const struct rte_flow_item_pppoe_proto_id
rte_flow_item_pppoe_proto_id_mask = {
.proto_id = RTE_BE16(0xffff),
};
#endif
ethdev: add flow tag A tag is a transient data which can be used during flow match. This can be used to store match result from a previous table so that the same pattern need not be matched again on the next table. Even if outer header is decapsulated on the previous match, the match result can be kept. Some device expose internal registers of its flow processing pipeline and those registers are quite useful for stateful connection tracking as it keeps status of flow matching. Multiple tags are supported by specifying index. Example testpmd commands are: flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xaa00bb mask 0xffff00ff / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xcc00 mask 0xff00 / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xaa00bb value mask 0xffff00ff / eth ... / end actions ... jump group 2 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xcc00 value mask 0xff00 / tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end flow create 0 ingress group 2 pattern tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-10-27 18:42:28 +00:00
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_TAG
*
* Matches a specified tag value at the specified index.
*/
struct rte_flow_item_tag {
uint32_t data;
uint8_t index;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_TAG. */
#ifndef __cplusplus
static const struct rte_flow_item_tag rte_flow_item_tag_mask = {
.data = 0xffffffff,
.index = 0xff,
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_L2TPV3OIP.
*
* Matches a L2TPv3 over IP header.
*/
struct rte_flow_item_l2tpv3oip {
rte_be32_t session_id; /**< Session ID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_L2TPV3OIP. */
#ifndef __cplusplus
static const struct rte_flow_item_l2tpv3oip rte_flow_item_l2tpv3oip_mask = {
.session_id = RTE_BE32(UINT32_MAX),
};
#endif
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_MARK
*
* Matches an arbitrary integer value which was set using the ``MARK`` action
* in a previously matched rule.
*
* This item can only be specified once as a match criteria as the ``MARK``
* action can only be specified once in a flow action.
*
* This value is arbitrary and application-defined. Maximum allowed value
* depends on the underlying implementation.
*
* Depending on the underlying implementation the MARK item may be supported on
* the physical device, with virtual groups in the PMD or not at all.
*/
struct rte_flow_item_mark {
uint32_t id; /**< Integer value to match against. */
};
ethdev: add flow tag A tag is a transient data which can be used during flow match. This can be used to store match result from a previous table so that the same pattern need not be matched again on the next table. Even if outer header is decapsulated on the previous match, the match result can be kept. Some device expose internal registers of its flow processing pipeline and those registers are quite useful for stateful connection tracking as it keeps status of flow matching. Multiple tags are supported by specifying index. Example testpmd commands are: flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xaa00bb mask 0xffff00ff / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xcc00 mask 0xff00 / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xaa00bb value mask 0xffff00ff / eth ... / end actions ... jump group 2 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xcc00 value mask 0xff00 / tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end flow create 0 ingress group 2 pattern tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-10-27 18:42:28 +00:00
/** Default mask for RTE_FLOW_ITEM_TYPE_MARK. */
#ifndef __cplusplus
static const struct rte_flow_item_mark rte_flow_item_mark_mask = {
.id = 0xffffffff,
};
#endif
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_NSH
*
* Match network service header (NSH), RFC 8300
*
*/
struct rte_flow_item_nsh {
uint32_t version:2;
uint32_t oam_pkt:1;
uint32_t reserved:1;
uint32_t ttl:6;
uint32_t length:6;
uint32_t reserved1:4;
uint32_t mdtype:4;
uint32_t next_proto:8;
uint32_t spi:24;
uint32_t sindex:8;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_NSH. */
#ifndef __cplusplus
static const struct rte_flow_item_nsh rte_flow_item_nsh_mask = {
.mdtype = 0xf,
.next_proto = 0xff,
.spi = 0xffffff,
.sindex = 0xff,
};
#endif
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_IGMP
*
* Match Internet Group Management Protocol (IGMP), RFC 2236
*
*/
struct rte_flow_item_igmp {
uint32_t type:8;
uint32_t max_resp_time:8;
uint32_t checksum:16;
uint32_t group_addr;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_IGMP. */
#ifndef __cplusplus
static const struct rte_flow_item_igmp rte_flow_item_igmp_mask = {
.group_addr = 0xffffffff,
};
#endif
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_AH
*
* Match IP Authentication Header (AH), RFC 4302
*
*/
struct rte_flow_item_ah {
uint32_t next_hdr:8;
uint32_t payload_len:8;
uint32_t reserved:16;
uint32_t spi;
uint32_t seq_num;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_AH. */
#ifndef __cplusplus
static const struct rte_flow_item_ah rte_flow_item_ah_mask = {
.spi = 0xffffffff,
};
#endif
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_PFCP
*
* Match PFCP Header
*/
struct rte_flow_item_pfcp {
uint8_t s_field;
uint8_t msg_type;
rte_be16_t msg_len;
rte_be64_t seid;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_PFCP. */
#ifndef __cplusplus
static const struct rte_flow_item_pfcp rte_flow_item_pfcp_mask = {
.s_field = 0x01,
.seid = RTE_BE64(UINT64_C(0xffffffffffffffff)),
};
#endif
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_ECPRI
*
* Match eCPRI Header
*/
struct rte_flow_item_ecpri {
struct rte_ecpri_combined_msg_hdr hdr;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ECPRI. */
#ifndef __cplusplus
static const struct rte_flow_item_ecpri rte_flow_item_ecpri_mask = {
.hdr = {
.common = {
.u32 = 0x0,
},
},
};
#endif
/**
* RTE_FLOW_ITEM_TYPE_GENEVE_OPT
*
* Matches a GENEVE Variable Length Option
*/
struct rte_flow_item_geneve_opt {
rte_be16_t option_class;
uint8_t option_type;
uint8_t option_len;
uint32_t *data;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_GENEVE_OPT. */
#ifndef __cplusplus
static const struct rte_flow_item_geneve_opt
rte_flow_item_geneve_opt_mask = {
.option_type = 0xff,
};
#endif
ethdev: add packet integrity check flow rules Currently, DPDK application can offload the checksum check, and report it in the mbuf. However, as more and more applications are offloading some or all logic and action to the HW, there is a need to check the packet integrity so the right decision can be taken. The application logic can be positive meaning if the packet is valid jump / do actions, or negative if packet is not valid jump to SW / do actions (like drop) and add default flow (match all in low priority) that will direct the miss packet to the miss path. Since currently rte_flow works in positive way the assumption is that the positive way will be the common way in this case also. When thinking what is the best API to implement such feature, we need to consider the following (in no specific order): 1. API breakage. 2. Simplicity. 3. Performance. 4. HW capabilities. 5. rte_flow limitation. 6. Flexibility. First option: Add integrity flags to each of the items. For example add checksum_ok to IPv4 item. Pros: 1. No new rte_flow item. 2. Simple in the way that on each item the app can see what checks are available. Cons: 1. API breakage. 2. Increase number of flows, since app can't add global rule and must have dedicated flow for each of the flow combinations, for example matching on ICMP traffic or UDP/TCP traffic with IPv4 / IPv6 will result in 5 flows. Second option: dedicated item Pros: 1. No API breakage, and there will be no for some time due to having extra space. (by using bits) 2. Just one flow to support the ICMP or UDP/TCP traffic with IPv4 / IPv6. 3. Simplicity application can just look at one place to see all possible checks. 4. Allow future support for more tests. Cons: 1. New item, that holds number of fields from different items. For starter the following bits are suggested: 1. packet_ok - means that all HW checks depending on packet layer have passed. This may mean that in some HW such flow should be split to number of flows or fail. 2. l2_ok - all check for layer 2 have passed. 3. l3_ok - all check for layer 3 have passed. If packet doesn't have L3 layer this check should fail. 4. l4_ok - all check for layer 4 have passed. If packet doesn't have L4 layer this check should fail. 5. l2_crc_ok - the layer 2 CRC is O.K. 6. ipv4_csum_ok - IPv4 checksum is O.K. It is possible that the IPv4 checksum will be O.K. but the l3_ok will be 0. It is not possible that checksum will be 0 and the l3_ok will be 1. 7. l4_csum_ok - layer 4 checksum is O.K. 8. l3_len_OK - check that the reported layer 3 length is smaller than the frame length. Example of usage: 1. Check packets from all possible layers for integrity. flow create integrity spec packet_ok = 1 mask packet_ok = 1 ..... 2. Check only packet with layer 4 (UDP / TCP) flow create integrity spec l3_ok = 1, l4_ok = 1 mask l3_ok = 1 l4_ok = 1 Signed-off-by: Ori Kam <orika@nvidia.com> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 12:44:30 +00:00
struct rte_flow_item_integrity {
/**< Tunnel encapsulation level the item should apply to.
* @see rte_flow_action_rss
*/
uint32_t level;
RTE_STD_C11
union {
__extension__
struct {
/**< The packet is valid after passing all HW checks. */
uint64_t packet_ok:1;
/**< L2 layer is valid after passing all HW checks. */
uint64_t l2_ok:1;
/**< L3 layer is valid after passing all HW checks. */
uint64_t l3_ok:1;
/**< L4 layer is valid after passing all HW checks. */
uint64_t l4_ok:1;
/**< L2 layer CRC is valid. */
uint64_t l2_crc_ok:1;
/**< IPv4 layer checksum is valid. */
uint64_t ipv4_csum_ok:1;
/**< L4 layer checksum is valid. */
uint64_t l4_csum_ok:1;
/**< The l3 length is smaller than the frame length. */
uint64_t l3_len_ok:1;
uint64_t reserved:56;
};
uint64_t value;
};
};
#ifndef __cplusplus
static const struct rte_flow_item_integrity
rte_flow_item_integrity_mask = {
.level = 0,
.value = 0,
};
#endif
ethdev: introduce conntrack flow action and item This commit introduces the conntrack action and item. Usually the HW offloading is stateless. For some stateful offloading like a TCP connection, HW module will help provide the ability of a full offloading w/o SW participation after the connection was established. The basic usage is that in the first flow rule the application should add the conntrack action and jump to the next flow table. In the following flow rule(s) of the next table, the application should use the conntrack item to match on the result. A TCP connection has two directions traffic. To set a conntrack action context correctly, the information of packets from both directions are required. The conntrack action should be created on one ethdev port and supply the peer ethdev port as a parameter to the action. After context created, it could only be used between these two ethdev ports (dual-port mode) or a single port. The application should modify the action via the API "rte_action_handle_update" only when before using it to create a flow rule with conntrack for the opposite direction. This will help the driver to recognize the direction of the flow to be created, especially in the single-port mode, in which case the traffic from both directions will go through the same ethdev port if the application works as an "forwarding engine" but not an end point. There is no need to call the update interface if the subsequent flow rules have nothing to be changed. Query will be supported via "rte_action_handle_query" interface, about the current packets information and connection status. The fields query capabilities depends on the HW. For the packets received during the conntrack setup, it is suggested to re-inject the packets in order to make sure the conntrack module works correctly without missing any packet. Only the valid packets should pass the conntrack, packets with invalid TCP information, like out of window, or with invalid header, like malformed, should not pass. Naming and definition: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ netfilter/nf_conntrack_tcp.h https://elixir.bootlin.com/linux/latest/source/net/netfilter/ nf_conntrack_proto_tcp.c Other reference: https://www.usenix.org/legacy/events/sec01/invitedtalks/rooij.pdf Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 17:51:30 +00:00
/**
* The packet is valid after conntrack checking.
*/
#define RTE_FLOW_CONNTRACK_PKT_STATE_VALID RTE_BIT32(0)
/**
* The state of the connection is changed.
*/
#define RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED RTE_BIT32(1)
/**
* Error is detected on this packet for this connection and
* an invalid state is set.
*/
#define RTE_FLOW_CONNTRACK_PKT_STATE_INVALID RTE_BIT32(2)
/**
* The HW connection tracking module is disabled.
* It can be due to application command or an invalid state.
*/
#define RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED RTE_BIT32(3)
/**
* The packet contains some bad field(s) and cannot continue
* with the conntrack module checking.
*/
#define RTE_FLOW_CONNTRACK_PKT_STATE_BAD RTE_BIT32(4)
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ITEM_TYPE_CONNTRACK
*
* Matches the state of a packet after it passed the connection tracking
* examination. The state is a bitmap of one RTE_FLOW_CONNTRACK_PKT_STATE*
* or a reasonable combination of these bits.
*/
struct rte_flow_item_conntrack {
uint32_t flags;
};
/** Default mask for RTE_FLOW_ITEM_TYPE_CONNTRACK. */
#ifndef __cplusplus
static const struct rte_flow_item_conntrack rte_flow_item_conntrack_mask = {
.flags = 0xffffffff,
};
#endif
/**
* Matching pattern item definition.
*
* A pattern is formed by stacking items starting from the lowest protocol
* layer to match. This stacking restriction does not apply to meta items
* which can be placed anywhere in the stack without affecting the meaning
* of the resulting pattern.
*
* Patterns are terminated by END items.
*
* The spec field should be a valid pointer to a structure of the related
* item type. It may remain unspecified (NULL) in many cases to request
* broad (nonspecific) matching. In such cases, last and mask must also be
* set to NULL.
*
* Optionally, last can point to a structure of the same type to define an
* inclusive range. This is mostly supported by integer and address fields,
* may cause errors otherwise. Fields that do not support ranges must be set
* to 0 or to the same value as the corresponding fields in spec.
*
* Only the fields defined to nonzero values in the default masks (see
* rte_flow_item_{name}_mask constants) are considered relevant by
* default. This can be overridden by providing a mask structure of the
* same type with applicable bits set to one. It can also be used to
* partially filter out specific fields (e.g. as an alternate mean to match
* ranges of IP addresses).
*
* Mask is a simple bit-mask applied before interpreting the contents of
* spec and last, which may yield unexpected results if not used
* carefully. For example, if for an IPv4 address field, spec provides
* 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the
* effective range becomes 10.1.0.0 to 10.3.255.255.
*/
struct rte_flow_item {
enum rte_flow_item_type type; /**< Item type. */
const void *spec; /**< Pointer to item specification structure. */
const void *last; /**< Defines an inclusive range (spec to last). */
const void *mask; /**< Bit-mask applied to spec and last. */
};
/**
* Action types.
*
* Each possible action is represented by a type.
* An action can have an associated configuration object.
* Several actions combined in a list can be assigned
* to a flow rule and are performed in order.
*
* They fall in three categories:
*
ethdev: alter behavior of flow API actions This patch makes the following changes to flow rule actions: - List order now matters, they are redefined as performed first to last instead of "all simultaneously". - Repeated actions are now supported (e.g. specifying QUEUE multiple times now duplicates traffic among them). Previously only the last action of any given kind was taken into account. - No more distinction between terminating/non-terminating/meta actions. Flow rules themselves are now defined as always terminating unless a PASSTHRU action is specified. These changes alter the behavior of flow rules in corner cases in order to prepare the flow API for actions that modify traffic contents or properties (e.g. encapsulation, compression) and for which order matter when combined. Previously one would have to do so through multiple flow rules by combining PASSTRHU with priority levels, however this proved overly complex to implement at the PMD level, hence this simpler approach. This breaks ABI compatibility for the following public functions: - rte_flow_create() - rte_flow_validate() PMDs with rte_flow support are modified accordingly: - bnxt: no change, implementation already forbids multiple actions and does not support PASSTHRU. - e1000: no change, same as bnxt. - enic: modified to forbid redundant actions, no support for default drop. - failsafe: no change needed. - i40e: no change, implementation already forbids multiple actions. - ixgbe: same as i40e. - mlx4: modified to forbid multiple fate-deciding actions and drop when unspecified. - mlx5: same as mlx4, with other redundant actions also forbidden. - sfc: same as mlx4. - tap: implementation already complies with the new behavior except for the default pass-through modified as a default drop. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:46 +00:00
* - Actions that modify the fate of matching traffic, for instance by
* dropping or assigning it a specific destination.
*
ethdev: alter behavior of flow API actions This patch makes the following changes to flow rule actions: - List order now matters, they are redefined as performed first to last instead of "all simultaneously". - Repeated actions are now supported (e.g. specifying QUEUE multiple times now duplicates traffic among them). Previously only the last action of any given kind was taken into account. - No more distinction between terminating/non-terminating/meta actions. Flow rules themselves are now defined as always terminating unless a PASSTHRU action is specified. These changes alter the behavior of flow rules in corner cases in order to prepare the flow API for actions that modify traffic contents or properties (e.g. encapsulation, compression) and for which order matter when combined. Previously one would have to do so through multiple flow rules by combining PASSTRHU with priority levels, however this proved overly complex to implement at the PMD level, hence this simpler approach. This breaks ABI compatibility for the following public functions: - rte_flow_create() - rte_flow_validate() PMDs with rte_flow support are modified accordingly: - bnxt: no change, implementation already forbids multiple actions and does not support PASSTHRU. - e1000: no change, same as bnxt. - enic: modified to forbid redundant actions, no support for default drop. - failsafe: no change needed. - i40e: no change, implementation already forbids multiple actions. - ixgbe: same as i40e. - mlx4: modified to forbid multiple fate-deciding actions and drop when unspecified. - mlx5: same as mlx4, with other redundant actions also forbidden. - sfc: same as mlx4. - tap: implementation already complies with the new behavior except for the default pass-through modified as a default drop. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:46 +00:00
* - Actions that modify matching traffic contents or its properties. This
* includes adding/removing encapsulation, encryption, compression and
* marks.
*
ethdev: alter behavior of flow API actions This patch makes the following changes to flow rule actions: - List order now matters, they are redefined as performed first to last instead of "all simultaneously". - Repeated actions are now supported (e.g. specifying QUEUE multiple times now duplicates traffic among them). Previously only the last action of any given kind was taken into account. - No more distinction between terminating/non-terminating/meta actions. Flow rules themselves are now defined as always terminating unless a PASSTHRU action is specified. These changes alter the behavior of flow rules in corner cases in order to prepare the flow API for actions that modify traffic contents or properties (e.g. encapsulation, compression) and for which order matter when combined. Previously one would have to do so through multiple flow rules by combining PASSTRHU with priority levels, however this proved overly complex to implement at the PMD level, hence this simpler approach. This breaks ABI compatibility for the following public functions: - rte_flow_create() - rte_flow_validate() PMDs with rte_flow support are modified accordingly: - bnxt: no change, implementation already forbids multiple actions and does not support PASSTHRU. - e1000: no change, same as bnxt. - enic: modified to forbid redundant actions, no support for default drop. - failsafe: no change needed. - i40e: no change, implementation already forbids multiple actions. - ixgbe: same as i40e. - mlx4: modified to forbid multiple fate-deciding actions and drop when unspecified. - mlx5: same as mlx4, with other redundant actions also forbidden. - sfc: same as mlx4. - tap: implementation already complies with the new behavior except for the default pass-through modified as a default drop. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:46 +00:00
* - Actions related to the flow rule itself, such as updating counters or
* making it non-terminating.
*
ethdev: alter behavior of flow API actions This patch makes the following changes to flow rule actions: - List order now matters, they are redefined as performed first to last instead of "all simultaneously". - Repeated actions are now supported (e.g. specifying QUEUE multiple times now duplicates traffic among them). Previously only the last action of any given kind was taken into account. - No more distinction between terminating/non-terminating/meta actions. Flow rules themselves are now defined as always terminating unless a PASSTHRU action is specified. These changes alter the behavior of flow rules in corner cases in order to prepare the flow API for actions that modify traffic contents or properties (e.g. encapsulation, compression) and for which order matter when combined. Previously one would have to do so through multiple flow rules by combining PASSTRHU with priority levels, however this proved overly complex to implement at the PMD level, hence this simpler approach. This breaks ABI compatibility for the following public functions: - rte_flow_create() - rte_flow_validate() PMDs with rte_flow support are modified accordingly: - bnxt: no change, implementation already forbids multiple actions and does not support PASSTHRU. - e1000: no change, same as bnxt. - enic: modified to forbid redundant actions, no support for default drop. - failsafe: no change needed. - i40e: no change, implementation already forbids multiple actions. - ixgbe: same as i40e. - mlx4: modified to forbid multiple fate-deciding actions and drop when unspecified. - mlx5: same as mlx4, with other redundant actions also forbidden. - sfc: same as mlx4. - tap: implementation already complies with the new behavior except for the default pass-through modified as a default drop. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:46 +00:00
* Flow rules being terminating by default, not specifying any action of the
* fate kind results in undefined behavior. This applies to both ingress and
* egress.
*
ethdev: alter behavior of flow API actions This patch makes the following changes to flow rule actions: - List order now matters, they are redefined as performed first to last instead of "all simultaneously". - Repeated actions are now supported (e.g. specifying QUEUE multiple times now duplicates traffic among them). Previously only the last action of any given kind was taken into account. - No more distinction between terminating/non-terminating/meta actions. Flow rules themselves are now defined as always terminating unless a PASSTHRU action is specified. These changes alter the behavior of flow rules in corner cases in order to prepare the flow API for actions that modify traffic contents or properties (e.g. encapsulation, compression) and for which order matter when combined. Previously one would have to do so through multiple flow rules by combining PASSTRHU with priority levels, however this proved overly complex to implement at the PMD level, hence this simpler approach. This breaks ABI compatibility for the following public functions: - rte_flow_create() - rte_flow_validate() PMDs with rte_flow support are modified accordingly: - bnxt: no change, implementation already forbids multiple actions and does not support PASSTHRU. - e1000: no change, same as bnxt. - enic: modified to forbid redundant actions, no support for default drop. - failsafe: no change needed. - i40e: no change, implementation already forbids multiple actions. - ixgbe: same as i40e. - mlx4: modified to forbid multiple fate-deciding actions and drop when unspecified. - mlx5: same as mlx4, with other redundant actions also forbidden. - sfc: same as mlx4. - tap: implementation already complies with the new behavior except for the default pass-through modified as a default drop. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:46 +00:00
* PASSTHRU, when supported, makes a flow rule non-terminating.
*/
enum rte_flow_action_type {
/**
* End marker for action lists. Prevents further processing of
* actions, thereby ending the list.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_END,
/**
* Used as a placeholder for convenience. It is ignored and simply
* discarded by PMDs.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_VOID,
/**
ethdev: alter behavior of flow API actions This patch makes the following changes to flow rule actions: - List order now matters, they are redefined as performed first to last instead of "all simultaneously". - Repeated actions are now supported (e.g. specifying QUEUE multiple times now duplicates traffic among them). Previously only the last action of any given kind was taken into account. - No more distinction between terminating/non-terminating/meta actions. Flow rules themselves are now defined as always terminating unless a PASSTHRU action is specified. These changes alter the behavior of flow rules in corner cases in order to prepare the flow API for actions that modify traffic contents or properties (e.g. encapsulation, compression) and for which order matter when combined. Previously one would have to do so through multiple flow rules by combining PASSTRHU with priority levels, however this proved overly complex to implement at the PMD level, hence this simpler approach. This breaks ABI compatibility for the following public functions: - rte_flow_create() - rte_flow_validate() PMDs with rte_flow support are modified accordingly: - bnxt: no change, implementation already forbids multiple actions and does not support PASSTHRU. - e1000: no change, same as bnxt. - enic: modified to forbid redundant actions, no support for default drop. - failsafe: no change needed. - i40e: no change, implementation already forbids multiple actions. - ixgbe: same as i40e. - mlx4: modified to forbid multiple fate-deciding actions and drop when unspecified. - mlx5: same as mlx4, with other redundant actions also forbidden. - sfc: same as mlx4. - tap: implementation already complies with the new behavior except for the default pass-through modified as a default drop. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:46 +00:00
* Leaves traffic up for additional processing by subsequent flow
* rules; makes a flow rule non-terminating.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_PASSTHRU,
/**
* RTE_FLOW_ACTION_TYPE_JUMP
*
* Redirects packets to a group on the current device.
*
* See struct rte_flow_action_jump.
*/
RTE_FLOW_ACTION_TYPE_JUMP,
/**
* Attaches an integer value to packets and sets PKT_RX_FDIR and
* PKT_RX_FDIR_ID mbuf flags.
*
* See struct rte_flow_action_mark.
*/
RTE_FLOW_ACTION_TYPE_MARK,
/**
* Flags packets. Similar to MARK without a specific value; only
* sets the PKT_RX_FDIR mbuf flag.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_FLAG,
/**
* Assigns packets to a given queue index.
*
* See struct rte_flow_action_queue.
*/
RTE_FLOW_ACTION_TYPE_QUEUE,
/**
* Drops packets.
*
* PASSTHRU overrides this action if both are specified.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_DROP,
/**
ethdev: alter behavior of flow API actions This patch makes the following changes to flow rule actions: - List order now matters, they are redefined as performed first to last instead of "all simultaneously". - Repeated actions are now supported (e.g. specifying QUEUE multiple times now duplicates traffic among them). Previously only the last action of any given kind was taken into account. - No more distinction between terminating/non-terminating/meta actions. Flow rules themselves are now defined as always terminating unless a PASSTHRU action is specified. These changes alter the behavior of flow rules in corner cases in order to prepare the flow API for actions that modify traffic contents or properties (e.g. encapsulation, compression) and for which order matter when combined. Previously one would have to do so through multiple flow rules by combining PASSTRHU with priority levels, however this proved overly complex to implement at the PMD level, hence this simpler approach. This breaks ABI compatibility for the following public functions: - rte_flow_create() - rte_flow_validate() PMDs with rte_flow support are modified accordingly: - bnxt: no change, implementation already forbids multiple actions and does not support PASSTHRU. - e1000: no change, same as bnxt. - enic: modified to forbid redundant actions, no support for default drop. - failsafe: no change needed. - i40e: no change, implementation already forbids multiple actions. - ixgbe: same as i40e. - mlx4: modified to forbid multiple fate-deciding actions and drop when unspecified. - mlx5: same as mlx4, with other redundant actions also forbidden. - sfc: same as mlx4. - tap: implementation already complies with the new behavior except for the default pass-through modified as a default drop. Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com> Reviewed-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-04-25 15:27:46 +00:00
* Enables counters for this flow rule.
*
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* These counters can be retrieved and reset through rte_flow_query() or
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* rte_flow_action_handle_query() if the action provided via handle,
* see struct rte_flow_query_count.
*
* See struct rte_flow_action_count.
*/
RTE_FLOW_ACTION_TYPE_COUNT,
/**
* Similar to QUEUE, except RSS is additionally performed on packets
* to spread them among several queues according to the provided
* parameters.
*
* See struct rte_flow_action_rss.
*/
RTE_FLOW_ACTION_TYPE_RSS,
/**
* Directs matching traffic to the physical function (PF) of the
* current device.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_PF,
/**
* Directs matching traffic to a given virtual function of the
* current device.
*
* See struct rte_flow_action_vf.
*/
RTE_FLOW_ACTION_TYPE_VF,
/**
* Directs packets to a given physical port index of the underlying
* device.
*
* See struct rte_flow_action_phy_port.
*/
RTE_FLOW_ACTION_TYPE_PHY_PORT,
/**
* Directs matching traffic to a given DPDK port ID.
*
* See struct rte_flow_action_port_id.
*/
RTE_FLOW_ACTION_TYPE_PORT_ID,
/**
* Traffic metering and policing (MTR).
*
* See struct rte_flow_action_meter.
* See file rte_mtr.h for MTR object configuration.
*/
RTE_FLOW_ACTION_TYPE_METER,
/**
* Redirects packets to security engine of current device for security
* processing as specified by security session.
*
* See struct rte_flow_action_security.
*/
RTE_FLOW_ACTION_TYPE_SECURITY,
/**
* Implements OFPAT_SET_MPLS_TTL ("MPLS TTL") as defined by the
* OpenFlow Switch Specification.
*
* See struct rte_flow_action_of_set_mpls_ttl.
*/
RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL,
/**
* Implements OFPAT_DEC_MPLS_TTL ("decrement MPLS TTL") as defined
* by the OpenFlow Switch Specification.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_OF_DEC_MPLS_TTL,
/**
* Implements OFPAT_SET_NW_TTL ("IP TTL") as defined by the OpenFlow
* Switch Specification.
*
* See struct rte_flow_action_of_set_nw_ttl.
*/
RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL,
/**
* Implements OFPAT_DEC_NW_TTL ("decrement IP TTL") as defined by
* the OpenFlow Switch Specification.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL,
/**
* Implements OFPAT_COPY_TTL_OUT ("copy TTL "outwards" -- from
* next-to-outermost to outermost") as defined by the OpenFlow
* Switch Specification.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_OUT,
/**
* Implements OFPAT_COPY_TTL_IN ("copy TTL "inwards" -- from
* outermost to next-to-outermost") as defined by the OpenFlow
* Switch Specification.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_IN,
/**
* Implements OFPAT_POP_VLAN ("pop the outer VLAN tag") as defined
* by the OpenFlow Switch Specification.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
/**
* Implements OFPAT_PUSH_VLAN ("push a new VLAN tag") as defined by
* the OpenFlow Switch Specification.
*
* See struct rte_flow_action_of_push_vlan.
*/
RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
/**
* Implements OFPAT_SET_VLAN_VID ("set the 802.1q VLAN id") as
* defined by the OpenFlow Switch Specification.
*
* See struct rte_flow_action_of_set_vlan_vid.
*/
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
/**
* Implements OFPAT_SET_LAN_PCP ("set the 802.1q priority") as
* defined by the OpenFlow Switch Specification.
*
* See struct rte_flow_action_of_set_vlan_pcp.
*/
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
/**
* Implements OFPAT_POP_MPLS ("pop the outer MPLS tag") as defined
* by the OpenFlow Switch Specification.
*
* See struct rte_flow_action_of_pop_mpls.
*/
RTE_FLOW_ACTION_TYPE_OF_POP_MPLS,
/**
* Implements OFPAT_PUSH_MPLS ("push a new MPLS tag") as defined by
* the OpenFlow Switch Specification.
*
* See struct rte_flow_action_of_push_mpls.
*/
RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS,
/**
* Encapsulate flow in VXLAN tunnel as defined in
* rte_flow_action_vxlan_encap action structure.
*
* See struct rte_flow_action_vxlan_encap.
*/
RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
/**
* Decapsulate outer most VXLAN tunnel from matched flow.
*
* If flow pattern does not define a valid VXLAN tunnel (as specified by
* RFC7348) then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION
* error.
*/
RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
/**
* Encapsulate flow in NVGRE tunnel defined in the
* rte_flow_action_nvgre_encap action structure.
*
* See struct rte_flow_action_nvgre_encap.
*/
RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP,
/**
* Decapsulate outer most NVGRE tunnel from matched flow.
*
* If flow pattern does not define a valid NVGRE tunnel (as specified by
* RFC7637) then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION
* error.
*/
RTE_FLOW_ACTION_TYPE_NVGRE_DECAP,
ethdev: add raw encapsulation action Currenlty the encap/decap actions only support encapsulation of VXLAN and NVGRE L2 packets (L2 encapsulation is where the inner packet has a valid Ethernet header, while L3 encapsulation is where the inner packet doesn't have the Ethernet header). In addtion the parameter to to the encap action is a list of rte items, this results in 2 extra translation, between the application to the actioni and from the action to the NIC. This results in negative impact on the insertion performance. Looking forward there are going to be a need to support many more tunnel encapsulations. For example MPLSoGRE, MPLSoUDP. Adding the new encapsulation will result in duplication of code. For example the code for handling NVGRE and VXLAN are exactly the same, and each new tunnel will have the same exact structure. This patch introduce a raw encapsulation that can support L2 tunnel types and L3 tunnel types. In addtion the new encapsulations commands are using raw buffer inorder to save the converstion time, both for the application and the PMD. In order to encapsulate L3 tunnel type there is a need to use both actions in the same rule: The decap to remove the L2 of the original packet, and then encap command to encapsulate the packet with the tunnel. For decap L3 there is also a need to use both commands in the same flow first the decap command to remove the outer tunnel header and then encap to add the L2 header. Signed-off-by: Ori Kam <orika@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-10-22 17:38:09 +00:00
/**
* Add outer header whose template is provided in its data buffer
*
* See struct rte_flow_action_raw_encap.
*/
RTE_FLOW_ACTION_TYPE_RAW_ENCAP,
/**
* Remove outer header whose template is provided in its data buffer.
*
* See struct rte_flow_action_raw_decap
*/
RTE_FLOW_ACTION_TYPE_RAW_DECAP,
/**
* Modify IPv4 source address in the outermost IPv4 header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV4,
* then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_ipv4.
*/
RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC,
/**
* Modify IPv4 destination address in the outermost IPv4 header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV4,
* then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_ipv4.
*/
RTE_FLOW_ACTION_TYPE_SET_IPV4_DST,
/**
* Modify IPv6 source address in the outermost IPv6 header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV6,
* then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_ipv6.
*/
RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC,
/**
* Modify IPv6 destination address in the outermost IPv6 header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV6,
* then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_ipv6.
*/
RTE_FLOW_ACTION_TYPE_SET_IPV6_DST,
/**
* Modify source port number in the outermost TCP/UDP header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_TCP
* or RTE_FLOW_ITEM_TYPE_UDP, then the PMD should return a
* RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_tp.
*/
RTE_FLOW_ACTION_TYPE_SET_TP_SRC,
/**
* Modify destination port number in the outermost TCP/UDP header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_TCP
* or RTE_FLOW_ITEM_TYPE_UDP, then the PMD should return a
* RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_tp.
*/
RTE_FLOW_ACTION_TYPE_SET_TP_DST,
/**
* Swap the source and destination MAC addresses in the outermost
* Ethernet header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_ETH,
* then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_MAC_SWAP,
/**
* Decrease TTL value directly
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_DEC_TTL,
/**
* Set TTL value
*
* See struct rte_flow_action_set_ttl
*/
RTE_FLOW_ACTION_TYPE_SET_TTL,
/**
* Set source MAC address from matched flow.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_ETH,
* the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_mac.
*/
RTE_FLOW_ACTION_TYPE_SET_MAC_SRC,
/**
* Set destination MAC address from matched flow.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_ETH,
* the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_mac.
*/
RTE_FLOW_ACTION_TYPE_SET_MAC_DST,
/**
* Increase sequence number in the outermost TCP header.
*
* Action configuration specifies the value to increase
* TCP sequence number as a big-endian 32 bit integer.
*
* @p conf type:
* @code rte_be32_t * @endcode
*
* Using this action on non-matching traffic will result in
* undefined behavior.
*/
RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ,
/**
* Decrease sequence number in the outermost TCP header.
*
* Action configuration specifies the value to decrease
* TCP sequence number as a big-endian 32 bit integer.
*
* @p conf type:
* @code rte_be32_t * @endcode
*
* Using this action on non-matching traffic will result in
* undefined behavior.
*/
RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ,
/**
* Increase acknowledgment number in the outermost TCP header.
*
* Action configuration specifies the value to increase
* TCP acknowledgment number as a big-endian 32 bit integer.
*
* @p conf type:
* @code rte_be32_t * @endcode
* Using this action on non-matching traffic will result in
* undefined behavior.
*/
RTE_FLOW_ACTION_TYPE_INC_TCP_ACK,
/**
* Decrease acknowledgment number in the outermost TCP header.
*
* Action configuration specifies the value to decrease
* TCP acknowledgment number as a big-endian 32 bit integer.
*
* @p conf type:
* @code rte_be32_t * @endcode
*
* Using this action on non-matching traffic will result in
* undefined behavior.
*/
RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK,
ethdev: add flow tag A tag is a transient data which can be used during flow match. This can be used to store match result from a previous table so that the same pattern need not be matched again on the next table. Even if outer header is decapsulated on the previous match, the match result can be kept. Some device expose internal registers of its flow processing pipeline and those registers are quite useful for stateful connection tracking as it keeps status of flow matching. Multiple tags are supported by specifying index. Example testpmd commands are: flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xaa00bb mask 0xffff00ff / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xcc00 mask 0xff00 / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xaa00bb value mask 0xffff00ff / eth ... / end actions ... jump group 2 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xcc00 value mask 0xff00 / tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end flow create 0 ingress group 2 pattern tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-10-27 18:42:28 +00:00
/**
* Set Tag.
*
* Tag is for internal flow usage only and
* is not delivered to the application.
*
* See struct rte_flow_action_set_tag.
*/
RTE_FLOW_ACTION_TYPE_SET_TAG,
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
/**
* Set metadata on ingress or egress path.
*
* See struct rte_flow_action_set_meta.
*/
RTE_FLOW_ACTION_TYPE_SET_META,
/**
* Modify IPv4 DSCP in the outermost IP header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV4,
* then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_dscp.
*/
RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP,
/**
* Modify IPv6 DSCP in the outermost IP header.
*
* If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV6,
* then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error.
*
* See struct rte_flow_action_set_dscp.
*/
RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP,
/**
* Report as aged flow if timeout passed without any matching on the
* flow.
*
* See struct rte_flow_action_age.
* See function rte_flow_get_aged_flows
* see enum RTE_ETH_EVENT_FLOW_AGED
* See struct rte_flow_query_age
*/
RTE_FLOW_ACTION_TYPE_AGE,
/**
* The matching packets will be duplicated with specified ratio and
* applied with own set of actions with a fate action.
*
* See struct rte_flow_action_sample.
*/
RTE_FLOW_ACTION_TYPE_SAMPLE,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
/**
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* @deprecated
* @see RTE_FLOW_ACTION_TYPE_INDIRECT
*
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* Describe action shared across multiple flow rules.
*
* Allow multiple rules reference the same action by handle (see
* struct rte_flow_shared_action).
*/
RTE_FLOW_ACTION_TYPE_SHARED,
ethdev: introduce generic modify flow action Implement the generic modify flow API to allow manipulations on an arbitrary header field (as well as mark, metadata or tag) using data from another field or a user-specified value. This generic modify mechanism removes the necessity to implement a separate RTE Flow action every time we need to modify a new packet field in the future. Supported operation are: - set: copy data from source to destination. - add: integer addition, stores the result in destination. - sub: integer subtraction, stores the result in destination. The field ID is used to specify the desired source/destination packet field in order to simplify the API for various encapsulation models. Specifying the packet field ID with the needed encapsulation level is able to quickly get a packet field for any inner packet header. Alternatively, the special ID (ITEM_START) can be used to point to the very beginning of a packet. This ID in conjunction with the offset parameter provides great flexibility to copy/modify any part of a packet as needed. The number of bits to use from a source as well as the offset can be be specified to allow a partial copy or dividing a big packet field into multiple small fields (e.g. copying 128 bits of IPv6 to 4 tags). An immediate value (or a pointer to it) can be specified instead of the level and the offset for the special FIELD_VALUE ID (or FIELD_POINTER). Can be used as a source only. Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
2021-01-18 21:40:25 +00:00
/**
* Modify a packet header field, tag, mark or metadata.
*
* Allow the modification of an arbitrary header field via
* set, add and sub operations or copying its content into
* tag, meta or mark for future processing.
*
* See struct rte_flow_action_modify_field.
*/
RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
/**
* An action handle is referenced in a rule through an indirect action.
*
* The same action handle may be used in multiple rules for the same
* or different ethdev ports.
*/
RTE_FLOW_ACTION_TYPE_INDIRECT,
ethdev: introduce conntrack flow action and item This commit introduces the conntrack action and item. Usually the HW offloading is stateless. For some stateful offloading like a TCP connection, HW module will help provide the ability of a full offloading w/o SW participation after the connection was established. The basic usage is that in the first flow rule the application should add the conntrack action and jump to the next flow table. In the following flow rule(s) of the next table, the application should use the conntrack item to match on the result. A TCP connection has two directions traffic. To set a conntrack action context correctly, the information of packets from both directions are required. The conntrack action should be created on one ethdev port and supply the peer ethdev port as a parameter to the action. After context created, it could only be used between these two ethdev ports (dual-port mode) or a single port. The application should modify the action via the API "rte_action_handle_update" only when before using it to create a flow rule with conntrack for the opposite direction. This will help the driver to recognize the direction of the flow to be created, especially in the single-port mode, in which case the traffic from both directions will go through the same ethdev port if the application works as an "forwarding engine" but not an end point. There is no need to call the update interface if the subsequent flow rules have nothing to be changed. Query will be supported via "rte_action_handle_query" interface, about the current packets information and connection status. The fields query capabilities depends on the HW. For the packets received during the conntrack setup, it is suggested to re-inject the packets in order to make sure the conntrack module works correctly without missing any packet. Only the valid packets should pass the conntrack, packets with invalid TCP information, like out of window, or with invalid header, like malformed, should not pass. Naming and definition: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ netfilter/nf_conntrack_tcp.h https://elixir.bootlin.com/linux/latest/source/net/netfilter/ nf_conntrack_proto_tcp.c Other reference: https://www.usenix.org/legacy/events/sec01/invitedtalks/rooij.pdf Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 17:51:30 +00:00
/**
* [META]
*
* Enable tracking a TCP connection state.
*
* @see struct rte_flow_action_conntrack.
*/
RTE_FLOW_ACTION_TYPE_CONNTRACK,
};
/**
* RTE_FLOW_ACTION_TYPE_MARK
*
* Attaches an integer value to packets and sets PKT_RX_FDIR and
* PKT_RX_FDIR_ID mbuf flags.
*
* This value is arbitrary and application-defined. Maximum allowed value
* depends on the underlying implementation. It is returned in the
* hash.fdir.hi mbuf field.
*/
struct rte_flow_action_mark {
uint32_t id; /**< Integer value to return with packets. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_JUMP
*
* Redirects packets to a group on the current device.
*
* In a hierarchy of groups, which can be used to represent physical or logical
* flow tables on the device, this action allows the action to be a redirect to
* a group on that device.
*/
struct rte_flow_action_jump {
uint32_t group;
};
/**
* RTE_FLOW_ACTION_TYPE_QUEUE
*
* Assign packets to a given queue index.
*/
struct rte_flow_action_queue {
uint16_t index; /**< Queue index to use. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_AGE
*
* Report flow as aged-out if timeout passed without any matching
* on the flow. RTE_ETH_EVENT_FLOW_AGED event is triggered when a
* port detects new aged-out flows.
*
* The flow context and the flow handle will be reported by the
* rte_flow_get_aged_flows API.
*/
struct rte_flow_action_age {
uint32_t timeout:24; /**< Time in seconds. */
uint32_t reserved:8; /**< Reserved, must be zero. */
void *context;
/**< The user flow context, NULL means the rte_flow pointer. */
};
/**
* RTE_FLOW_ACTION_TYPE_AGE (query)
*
* Query structure to retrieve the aging status information of a
* shared AGE action, or a flow rule using the AGE action.
*/
struct rte_flow_query_age {
uint32_t reserved:6; /**< Reserved, must be zero. */
uint32_t aged:1; /**< 1 if aging timeout expired, 0 otherwise. */
uint32_t sec_since_last_hit_valid:1;
/**< sec_since_last_hit value is valid. */
uint32_t sec_since_last_hit:24; /**< Seconds since last traffic hit. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_COUNT
*
* Adds a counter action to a matched flow.
*
* If more than one count action is specified in a single flow rule, then each
* action must specify a unique id.
*
* Counters can be retrieved and reset through ``rte_flow_query()``, see
* ``struct rte_flow_query_count``.
*
* @deprecated Shared attribute is deprecated, use generic
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* RTE_FLOW_ACTION_TYPE_INDIRECT action.
*
* The shared flag indicates whether the counter is unique to the flow rule the
* action is specified with, or whether it is a shared counter.
*
* For a count action with the shared flag set, then then a global device
* namespace is assumed for the counter id, so that any matched flow rules using
* a count action with the same counter id on the same port will contribute to
* that counter.
*
* For ports within the same switch domain then the counter id namespace extends
* to all ports within that switch domain.
*/
struct rte_flow_action_count {
/** @deprecated Share counter ID with other flow rules. */
uint32_t shared:1;
uint32_t reserved:31; /**< Reserved, must be zero. */
uint32_t id; /**< Counter ID. */
};
/**
* RTE_FLOW_ACTION_TYPE_COUNT (query)
*
* Query structure to retrieve and reset flow rule counters.
*/
struct rte_flow_query_count {
uint32_t reset:1; /**< Reset counters after query [in]. */
uint32_t hits_set:1; /**< hits field is set [out]. */
uint32_t bytes_set:1; /**< bytes field is set [out]. */
uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */
uint64_t hits; /**< Number of hits for this rule [out]. */
uint64_t bytes; /**< Number of bytes through this rule [out]. */
};
/**
* Hash function types.
*/
enum rte_eth_hash_function {
RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
RTE_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
/**
* Symmetric Toeplitz: src, dst will be replaced by
* xor(src, dst). For the case with src/dst only,
* src or dst address will xor with zero pair.
*/
RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ,
RTE_ETH_HASH_FUNCTION_MAX,
};
/**
* RTE_FLOW_ACTION_TYPE_RSS
*
* Similar to QUEUE, except RSS is additionally performed on packets to
* spread them among several queues according to the provided parameters.
*
* Unlike global RSS settings used by other DPDK APIs, unsetting the
* @p types field does not disable RSS in a flow rule. Doing so instead
* requests safe unspecified "best-effort" settings from the underlying PMD,
* which depending on the flow rule, may result in anything ranging from
* empty (single queue) to all-inclusive RSS.
*
* Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
* hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
* both can be requested simultaneously.
*/
struct rte_flow_action_rss {
enum rte_eth_hash_function func; /**< RSS hash function to apply. */
/**
* Packet encapsulation level RSS hash @p types apply to.
*
* - @p 0 requests the default behavior. Depending on the packet
* type, it can mean outermost, innermost, anything in between or
* even no RSS.
*
* It basically stands for the innermost encapsulation level RSS
* can be performed on according to PMD and device capabilities.
*
* - @p 1 requests RSS to be performed on the outermost packet
* encapsulation level.
*
* - @p 2 and subsequent values request RSS to be performed on the
* specified inner packet encapsulation level, from outermost to
* innermost (lower to higher values).
*
* Values other than @p 0 are not necessarily supported.
*
* Requesting a specific RSS level on unrecognized traffic results
* in undefined behavior. For predictable results, it is recommended
* to make the flow rule pattern match packet headers up to the
* requested encapsulation level so that only matching traffic goes
* through.
*/
uint32_t level;
uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
uint32_t key_len; /**< Hash key length in bytes. */
uint32_t queue_num; /**< Number of entries in @p queue. */
const uint8_t *key; /**< Hash key. */
const uint16_t *queue; /**< Queue indices to use. */
};
/**
* RTE_FLOW_ACTION_TYPE_VF
*
* Directs matching traffic to a given virtual function of the current
* device.
*
* Packets matched by a VF pattern item can be redirected to their original
* VF ID instead of the specified one. This parameter may not be available
* and is not guaranteed to work properly if the VF part is matched by a
* prior flow rule or if packets are not addressed to a VF in the first
* place.
*/
struct rte_flow_action_vf {
uint32_t original:1; /**< Use original VF ID if possible. */
uint32_t reserved:31; /**< Reserved, must be zero. */
uint32_t id; /**< VF ID. */
};
/**
* RTE_FLOW_ACTION_TYPE_PHY_PORT
*
* Directs packets to a given physical port index of the underlying
* device.
*
* @see RTE_FLOW_ITEM_TYPE_PHY_PORT
*/
struct rte_flow_action_phy_port {
uint32_t original:1; /**< Use original port index if possible. */
uint32_t reserved:31; /**< Reserved, must be zero. */
uint32_t index; /**< Physical port index. */
};
/**
* RTE_FLOW_ACTION_TYPE_PORT_ID
*
* Directs matching traffic to a given DPDK port ID.
*
* @see RTE_FLOW_ITEM_TYPE_PORT_ID
*/
struct rte_flow_action_port_id {
uint32_t original:1; /**< Use original DPDK port ID if possible. */
uint32_t reserved:31; /**< Reserved, must be zero. */
uint32_t id; /**< DPDK port ID. */
};
/**
* RTE_FLOW_ACTION_TYPE_METER
*
* Traffic metering and policing (MTR).
*
* Packets matched by items of this type can be either dropped or passed to the
* next item with their color set by the MTR object.
*/
struct rte_flow_action_meter {
uint32_t mtr_id; /**< MTR object ID created with rte_mtr_create(). */
};
/**
* RTE_FLOW_ACTION_TYPE_SECURITY
*
* Perform the security action on flows matched by the pattern items
* according to the configuration of the security session.
*
* This action modifies the payload of matched flows. For INLINE_CRYPTO, the
* security protocol headers and IV are fully provided by the application as
* specified in the flow pattern. The payload of matching packets is
* encrypted on egress, and decrypted and authenticated on ingress.
* For INLINE_PROTOCOL, the security protocol is fully offloaded to HW,
* providing full encapsulation and decapsulation of packets in security
* protocols. The flow pattern specifies both the outer security header fields
* and the inner packet fields. The security session specified in the action
* must match the pattern parameters.
*
* The security session specified in the action must be created on the same
* port as the flow action that is being specified.
*
* The ingress/egress flow attribute should match that specified in the
* security session if the security session supports the definition of the
* direction.
*
* Multiple flows can be configured to use the same security session.
*
* The NULL value is allowed for security session. If security session is NULL,
* then SPI field in ESP flow item and IP addresses in flow items 'IPv4' and
* 'IPv6' will be allowed to be a range. The rule thus created can enable
* security processing on multiple flows.
*/
struct rte_flow_action_security {
void *security_session; /**< Pointer to security session structure. */
};
/**
* RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL
*
* Implements OFPAT_SET_MPLS_TTL ("MPLS TTL") as defined by the OpenFlow
* Switch Specification.
*/
struct rte_flow_action_of_set_mpls_ttl {
uint8_t mpls_ttl; /**< MPLS TTL. */
};
/**
* RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL
*
* Implements OFPAT_SET_NW_TTL ("IP TTL") as defined by the OpenFlow Switch
* Specification.
*/
struct rte_flow_action_of_set_nw_ttl {
uint8_t nw_ttl; /**< IP TTL. */
};
/**
* RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN
*
* Implements OFPAT_PUSH_VLAN ("push a new VLAN tag") as defined by the
* OpenFlow Switch Specification.
*/
struct rte_flow_action_of_push_vlan {
rte_be16_t ethertype; /**< EtherType. */
};
/**
* RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID
*
* Implements OFPAT_SET_VLAN_VID ("set the 802.1q VLAN id") as defined by
* the OpenFlow Switch Specification.
*/
struct rte_flow_action_of_set_vlan_vid {
rte_be16_t vlan_vid; /**< VLAN id. */
};
/**
* RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP
*
* Implements OFPAT_SET_LAN_PCP ("set the 802.1q priority") as defined by
* the OpenFlow Switch Specification.
*/
struct rte_flow_action_of_set_vlan_pcp {
uint8_t vlan_pcp; /**< VLAN priority. */
};
/**
* RTE_FLOW_ACTION_TYPE_OF_POP_MPLS
*
* Implements OFPAT_POP_MPLS ("pop the outer MPLS tag") as defined by the
* OpenFlow Switch Specification.
*/
struct rte_flow_action_of_pop_mpls {
rte_be16_t ethertype; /**< EtherType. */
};
/**
* RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS
*
* Implements OFPAT_PUSH_MPLS ("push a new MPLS tag") as defined by the
* OpenFlow Switch Specification.
*/
struct rte_flow_action_of_push_mpls {
rte_be16_t ethertype; /**< EtherType. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
*
* VXLAN tunnel end-point encapsulation data definition
*
* The tunnel definition is provided through the flow item pattern, the
* provided pattern must conform to RFC7348 for the tunnel specified. The flow
* definition must be provided in order from the RTE_FLOW_ITEM_TYPE_ETH
* definition up the end item which is specified by RTE_FLOW_ITEM_TYPE_END.
*
* The mask field allows user to specify which fields in the flow item
* definitions can be ignored and which have valid data and can be used
* verbatim.
*
* Note: the last field is not used in the definition of a tunnel and can be
* ignored.
*
* Valid flow definition for RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP include:
*
* - ETH / IPV4 / UDP / VXLAN / END
* - ETH / IPV6 / UDP / VXLAN / END
* - ETH / VLAN / IPV4 / UDP / VXLAN / END
*
*/
struct rte_flow_action_vxlan_encap {
/**
* Encapsulating vxlan tunnel definition
* (terminated by the END pattern item).
*/
struct rte_flow_item *definition;
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP
*
* NVGRE tunnel end-point encapsulation data definition
*
* The tunnel definition is provided through the flow item pattern the
* provided pattern must conform with RFC7637. The flow definition must be
* provided in order from the RTE_FLOW_ITEM_TYPE_ETH definition up the end item
* which is specified by RTE_FLOW_ITEM_TYPE_END.
*
* The mask field allows user to specify which fields in the flow item
* definitions can be ignored and which have valid data and can be used
* verbatim.
*
* Note: the last field is not used in the definition of a tunnel and can be
* ignored.
*
* Valid flow definition for RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP include:
*
* - ETH / IPV4 / NVGRE / END
* - ETH / VLAN / IPV6 / NVGRE / END
*
*/
struct rte_flow_action_nvgre_encap {
/**
* Encapsulating vxlan tunnel definition
* (terminated by the END pattern item).
*/
struct rte_flow_item *definition;
};
ethdev: add raw encapsulation action Currenlty the encap/decap actions only support encapsulation of VXLAN and NVGRE L2 packets (L2 encapsulation is where the inner packet has a valid Ethernet header, while L3 encapsulation is where the inner packet doesn't have the Ethernet header). In addtion the parameter to to the encap action is a list of rte items, this results in 2 extra translation, between the application to the actioni and from the action to the NIC. This results in negative impact on the insertion performance. Looking forward there are going to be a need to support many more tunnel encapsulations. For example MPLSoGRE, MPLSoUDP. Adding the new encapsulation will result in duplication of code. For example the code for handling NVGRE and VXLAN are exactly the same, and each new tunnel will have the same exact structure. This patch introduce a raw encapsulation that can support L2 tunnel types and L3 tunnel types. In addtion the new encapsulations commands are using raw buffer inorder to save the converstion time, both for the application and the PMD. In order to encapsulate L3 tunnel type there is a need to use both actions in the same rule: The decap to remove the L2 of the original packet, and then encap command to encapsulate the packet with the tunnel. For decap L3 there is also a need to use both commands in the same flow first the decap command to remove the outer tunnel header and then encap to add the L2 header. Signed-off-by: Ori Kam <orika@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
2018-10-22 17:38:09 +00:00
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_RAW_ENCAP
*
* Raw tunnel end-point encapsulation data definition.
*
* The data holds the headers definitions to be applied on the packet.
* The data must start with ETH header up to the tunnel item header itself.
* When used right after RAW_DECAP (for decapsulating L3 tunnel type for
* example MPLSoGRE) the data will just hold layer 2 header.
*
* The preserve parameter holds which bits in the packet the PMD is not allowed
* to change, this parameter can also be NULL and then the PMD is allowed
* to update any field.
*
* size holds the number of bytes in @p data and @p preserve.
*/
struct rte_flow_action_raw_encap {
uint8_t *data; /**< Encapsulation data. */
uint8_t *preserve; /**< Bit-mask of @p data to preserve on output. */
size_t size; /**< Size of @p data and @p preserve. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_RAW_DECAP
*
* Raw tunnel end-point decapsulation data definition.
*
* The data holds the headers definitions to be removed from the packet.
* The data must start with ETH header up to the tunnel item header itself.
* When used right before RAW_DECAP (for encapsulating L3 tunnel type for
* example MPLSoGRE) the data will just hold layer 2 header.
*
* size holds the number of bytes in @p data.
*/
struct rte_flow_action_raw_decap {
uint8_t *data; /**< Encapsulation data. */
size_t size; /**< Size of @p data and @p preserve. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
* RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
*
* Allows modification of IPv4 source (RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC)
* and destination address (RTE_FLOW_ACTION_TYPE_SET_IPV4_DST) in the
* specified outermost IPv4 header.
*/
struct rte_flow_action_set_ipv4 {
rte_be32_t ipv4_addr;
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
* RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
*
* Allows modification of IPv6 source (RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC)
* and destination address (RTE_FLOW_ACTION_TYPE_SET_IPV6_DST) in the
* specified outermost IPv6 header.
*/
struct rte_flow_action_set_ipv6 {
uint8_t ipv6_addr[16];
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_SET_TP_SRC
* RTE_FLOW_ACTION_TYPE_SET_TP_DST
*
* Allows modification of source (RTE_FLOW_ACTION_TYPE_SET_TP_SRC)
* and destination (RTE_FLOW_ACTION_TYPE_SET_TP_DST) port numbers
* in the specified outermost TCP/UDP header.
*/
struct rte_flow_action_set_tp {
rte_be16_t port;
};
/**
* RTE_FLOW_ACTION_TYPE_SET_TTL
*
* Set the TTL value directly for IPv4 or IPv6
*/
struct rte_flow_action_set_ttl {
uint8_t ttl_value;
};
/**
* RTE_FLOW_ACTION_TYPE_SET_MAC
*
* Set MAC address from the matched flow
*/
struct rte_flow_action_set_mac {
net: add rte prefix to ether defines Add 'RTE_' prefix to defines: - rename ETHER_ADDR_LEN as RTE_ETHER_ADDR_LEN. - rename ETHER_TYPE_LEN as RTE_ETHER_TYPE_LEN. - rename ETHER_CRC_LEN as RTE_ETHER_CRC_LEN. - rename ETHER_HDR_LEN as RTE_ETHER_HDR_LEN. - rename ETHER_MIN_LEN as RTE_ETHER_MIN_LEN. - rename ETHER_MAX_LEN as RTE_ETHER_MAX_LEN. - rename ETHER_MTU as RTE_ETHER_MTU. - rename ETHER_MAX_VLAN_FRAME_LEN as RTE_ETHER_MAX_VLAN_FRAME_LEN. - rename ETHER_MAX_VLAN_ID as RTE_ETHER_MAX_VLAN_ID. - rename ETHER_MAX_JUMBO_FRAME_LEN as RTE_ETHER_MAX_JUMBO_FRAME_LEN. - rename ETHER_MIN_MTU as RTE_ETHER_MIN_MTU. - rename ETHER_LOCAL_ADMIN_ADDR as RTE_ETHER_LOCAL_ADMIN_ADDR. - rename ETHER_GROUP_ADDR as RTE_ETHER_GROUP_ADDR. - rename ETHER_TYPE_IPv4 as RTE_ETHER_TYPE_IPv4. - rename ETHER_TYPE_IPv6 as RTE_ETHER_TYPE_IPv6. - rename ETHER_TYPE_ARP as RTE_ETHER_TYPE_ARP. - rename ETHER_TYPE_VLAN as RTE_ETHER_TYPE_VLAN. - rename ETHER_TYPE_RARP as RTE_ETHER_TYPE_RARP. - rename ETHER_TYPE_QINQ as RTE_ETHER_TYPE_QINQ. - rename ETHER_TYPE_ETAG as RTE_ETHER_TYPE_ETAG. - rename ETHER_TYPE_1588 as RTE_ETHER_TYPE_1588. - rename ETHER_TYPE_SLOW as RTE_ETHER_TYPE_SLOW. - rename ETHER_TYPE_TEB as RTE_ETHER_TYPE_TEB. - rename ETHER_TYPE_LLDP as RTE_ETHER_TYPE_LLDP. - rename ETHER_TYPE_MPLS as RTE_ETHER_TYPE_MPLS. - rename ETHER_TYPE_MPLSM as RTE_ETHER_TYPE_MPLSM. - rename ETHER_VXLAN_HLEN as RTE_ETHER_VXLAN_HLEN. - rename ETHER_ADDR_FMT_SIZE as RTE_ETHER_ADDR_FMT_SIZE. - rename VXLAN_GPE_TYPE_IPV4 as RTE_VXLAN_GPE_TYPE_IPV4. - rename VXLAN_GPE_TYPE_IPV6 as RTE_VXLAN_GPE_TYPE_IPV6. - rename VXLAN_GPE_TYPE_ETH as RTE_VXLAN_GPE_TYPE_ETH. - rename VXLAN_GPE_TYPE_NSH as RTE_VXLAN_GPE_TYPE_NSH. - rename VXLAN_GPE_TYPE_MPLS as RTE_VXLAN_GPE_TYPE_MPLS. - rename VXLAN_GPE_TYPE_GBP as RTE_VXLAN_GPE_TYPE_GBP. - rename VXLAN_GPE_TYPE_VBNG as RTE_VXLAN_GPE_TYPE_VBNG. - rename ETHER_VXLAN_GPE_HLEN as RTE_ETHER_VXLAN_GPE_HLEN. Do not update the command line library to avoid adding a dependency to librte_net. Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Reviewed-by: Stephen Hemminger <stephen@networkplumber.org> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2019-05-21 16:13:05 +00:00
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
};
ethdev: add flow tag A tag is a transient data which can be used during flow match. This can be used to store match result from a previous table so that the same pattern need not be matched again on the next table. Even if outer header is decapsulated on the previous match, the match result can be kept. Some device expose internal registers of its flow processing pipeline and those registers are quite useful for stateful connection tracking as it keeps status of flow matching. Multiple tags are supported by specifying index. Example testpmd commands are: flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xaa00bb mask 0xffff00ff / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress pattern ... / end actions set_tag index 2 value 0xcc00 mask 0xff00 / set_tag index 3 value 0x123456 mask 0xffffff / vxlan_decap / jump group 1 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xaa00bb value mask 0xffff00ff / eth ... / end actions ... jump group 2 / end flow create 0 ingress group 1 pattern tag index is 2 value spec 0xcc00 value mask 0xff00 / tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end flow create 0 ingress group 2 pattern tag index is 3 value spec 0x123456 value mask 0xffffff / eth ... / end actions ... / end Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-10-27 18:42:28 +00:00
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_SET_TAG
*
* Set a tag which is a transient data used during flow matching. This is not
* delivered to application. Multiple tags are supported by specifying index.
*/
struct rte_flow_action_set_tag {
uint32_t data;
uint32_t mask;
uint8_t index;
};
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_SET_META
*
ethdev: move egress metadata to dynamic field The dynamic mbuf fields were introduced by [1]. The egress metadata is good candidate to be moved from statically allocated field tx_metadata to dynamic one. Because mbufs are used in half-duplex fashion only, it is safe to share this dynamic field with ingress metadata. The shared dynamic field contains either egress (if application going to transmit mbuf with tx_burst) or ingress (if mbuf is received with rx_burst) metadata and can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_TX_DYNF_METADATA/PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior accessing the data. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. DEV_TX_OFFLOAD_MATCH_METADATA offload and configuration flag is removed. The metadata support in PMDs is engaged on dynamic field registration. Metadata feature is getting complex. We might have some set of actions and items that might be supported by PMDs in multiple combinations, the supported values and masks are the subjects to query by perfroming trials (with rte_flow_validate). [1] http://patches.dpdk.org/patch/62040/ Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:31 +00:00
* Set metadata. Metadata set by mbuf metadata dynamic field with
* PKT_TX_DYNF_DATA flag on egress will be overridden by this action. On
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
* ingress, the metadata will be carried by mbuf metadata dynamic field
* with PKT_RX_DYNF_METADATA flag if set. The dynamic mbuf field must be
* registered in advance by rte_flow_dynf_metadata_register().
*
* Altering partial bits is supported with mask. For bits which have never
* been set, unpredictable value will be seen depending on driver
* implementation. For loopback/hairpin packet, metadata set on Rx/Tx may
* or may not be propagated to the other path depending on HW capability.
*
* RTE_FLOW_ITEM_TYPE_META matches metadata.
*/
struct rte_flow_action_set_meta {
uint32_t data;
uint32_t mask;
};
/**
* RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
* RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
*
* Set the DSCP value for IPv4/IPv6 header.
* DSCP in low 6 bits, rest ignored.
*/
struct rte_flow_action_set_dscp {
uint8_t dscp;
};
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
/**
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* RTE_FLOW_ACTION_TYPE_INDIRECT
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Opaque type returned after successfully creating an indirect action object.
* The definition of the object handle is different per driver or
* per direct action type.
*
* This handle can be used to manage and query the related direct action:
* - referenced in single flow rule or across multiple flow rules
* over multiple ports
* - update action object configuration
* - query action object data
* - destroy action object
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*/
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
struct rte_flow_action_handle;
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
ethdev: introduce conntrack flow action and item This commit introduces the conntrack action and item. Usually the HW offloading is stateless. For some stateful offloading like a TCP connection, HW module will help provide the ability of a full offloading w/o SW participation after the connection was established. The basic usage is that in the first flow rule the application should add the conntrack action and jump to the next flow table. In the following flow rule(s) of the next table, the application should use the conntrack item to match on the result. A TCP connection has two directions traffic. To set a conntrack action context correctly, the information of packets from both directions are required. The conntrack action should be created on one ethdev port and supply the peer ethdev port as a parameter to the action. After context created, it could only be used between these two ethdev ports (dual-port mode) or a single port. The application should modify the action via the API "rte_action_handle_update" only when before using it to create a flow rule with conntrack for the opposite direction. This will help the driver to recognize the direction of the flow to be created, especially in the single-port mode, in which case the traffic from both directions will go through the same ethdev port if the application works as an "forwarding engine" but not an end point. There is no need to call the update interface if the subsequent flow rules have nothing to be changed. Query will be supported via "rte_action_handle_query" interface, about the current packets information and connection status. The fields query capabilities depends on the HW. For the packets received during the conntrack setup, it is suggested to re-inject the packets in order to make sure the conntrack module works correctly without missing any packet. Only the valid packets should pass the conntrack, packets with invalid TCP information, like out of window, or with invalid header, like malformed, should not pass. Naming and definition: https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ netfilter/nf_conntrack_tcp.h https://elixir.bootlin.com/linux/latest/source/net/netfilter/ nf_conntrack_proto_tcp.c Other reference: https://www.usenix.org/legacy/events/sec01/invitedtalks/rooij.pdf Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 17:51:30 +00:00
/**
* The state of a TCP connection.
*/
enum rte_flow_conntrack_state {
/** SYN-ACK packet was seen. */
RTE_FLOW_CONNTRACK_STATE_SYN_RECV,
/** 3-way handshake was done. */
RTE_FLOW_CONNTRACK_STATE_ESTABLISHED,
/** First FIN packet was received to close the connection. */
RTE_FLOW_CONNTRACK_STATE_FIN_WAIT,
/** First FIN was ACKed. */
RTE_FLOW_CONNTRACK_STATE_CLOSE_WAIT,
/** Second FIN was received, waiting for the last ACK. */
RTE_FLOW_CONNTRACK_STATE_LAST_ACK,
/** Second FIN was ACKed, connection was closed. */
RTE_FLOW_CONNTRACK_STATE_TIME_WAIT,
};
/**
* The last passed TCP packet flags of a connection.
*/
enum rte_flow_conntrack_tcp_last_index {
RTE_FLOW_CONNTRACK_FLAG_NONE = 0, /**< No Flag. */
RTE_FLOW_CONNTRACK_FLAG_SYN = RTE_BIT32(0), /**< With SYN flag. */
RTE_FLOW_CONNTRACK_FLAG_SYNACK = RTE_BIT32(1), /**< With SYNACK flag. */
RTE_FLOW_CONNTRACK_FLAG_FIN = RTE_BIT32(2), /**< With FIN flag. */
RTE_FLOW_CONNTRACK_FLAG_ACK = RTE_BIT32(3), /**< With ACK flag. */
RTE_FLOW_CONNTRACK_FLAG_RST = RTE_BIT32(4), /**< With RST flag. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* Configuration parameters for each direction of a TCP connection.
* All fields should be in host byte order.
* If needed, driver should convert all fields to network byte order
* if HW needs them in that way.
*/
struct rte_flow_tcp_dir_param {
/** TCP window scaling factor, 0xF to disable. */
uint32_t scale:4;
/** The FIN was sent by this direction. */
uint32_t close_initiated:1;
/** An ACK packet has been received by this side. */
uint32_t last_ack_seen:1;
/**
* If set, it indicates that there is unacknowledged data for the
* packets sent from this direction.
*/
uint32_t data_unacked:1;
/**
* Maximal value of sequence + payload length in sent
* packets (next ACK from the opposite direction).
*/
uint32_t sent_end;
/**
* Maximal value of (ACK + window size) in received packet + length
* over sent packet (maximal sequence could be sent).
*/
uint32_t reply_end;
/** Maximal value of actual window size in sent packets. */
uint32_t max_win;
/** Maximal value of ACK in sent packets. */
uint32_t max_ack;
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_CONNTRACK
*
* Configuration and initial state for the connection tracking module.
* This structure could be used for both setting and query.
* All fields should be in host byte order.
*/
struct rte_flow_action_conntrack {
/** The peer port number, can be the same port. */
uint16_t peer_port;
/**
* Direction of this connection when creating a flow rule, the
* value only affects the creation of subsequent flow rules.
*/
uint32_t is_original_dir:1;
/**
* Enable / disable the conntrack HW module. When disabled, the
* result will always be RTE_FLOW_CONNTRACK_FLAG_DISABLED.
* In this state the HW will act as passthrough.
* It only affects this conntrack object in the HW without any effect
* to the other objects.
*/
uint32_t enable:1;
/** At least one ack was seen after the connection was established. */
uint32_t live_connection:1;
/** Enable selective ACK on this connection. */
uint32_t selective_ack:1;
/** A challenge ack has passed. */
uint32_t challenge_ack_passed:1;
/**
* 1: The last packet is seen from the original direction.
* 0: The last packet is seen from the reply direction.
*/
uint32_t last_direction:1;
/** No TCP check will be done except the state change. */
uint32_t liberal_mode:1;
/**<The current state of this connection. */
enum rte_flow_conntrack_state state;
/** Scaling factor for maximal allowed ACK window. */
uint8_t max_ack_window;
/** Maximal allowed number of retransmission times. */
uint8_t retransmission_limit;
/** TCP parameters of the original direction. */
struct rte_flow_tcp_dir_param original_dir;
/** TCP parameters of the reply direction. */
struct rte_flow_tcp_dir_param reply_dir;
/** The window value of the last packet passed this conntrack. */
uint16_t last_window;
enum rte_flow_conntrack_tcp_last_index last_index;
/** The sequence of the last packet passed this conntrack. */
uint32_t last_seq;
/** The acknowledgment of the last packet passed this conntrack. */
uint32_t last_ack;
/**
* The total value ACK + payload length of the last packet
* passed this conntrack.
*/
uint32_t last_end;
};
/**
* RTE_FLOW_ACTION_TYPE_CONNTRACK
*
* Wrapper structure for the context update interface.
* Ports cannot support updating, and the only valid solution is to
* destroy the old context and create a new one instead.
*/
struct rte_flow_modify_conntrack {
/** New connection tracking parameters to be updated. */
struct rte_flow_action_conntrack new_ct;
/** The direction field will be updated. */
uint32_t direction:1;
/** All the other fields except direction will be updated. */
uint32_t state:1;
/** Reserved bits for the future usage. */
uint32_t reserved:30;
};
ethdev: introduce generic modify flow action Implement the generic modify flow API to allow manipulations on an arbitrary header field (as well as mark, metadata or tag) using data from another field or a user-specified value. This generic modify mechanism removes the necessity to implement a separate RTE Flow action every time we need to modify a new packet field in the future. Supported operation are: - set: copy data from source to destination. - add: integer addition, stores the result in destination. - sub: integer subtraction, stores the result in destination. The field ID is used to specify the desired source/destination packet field in order to simplify the API for various encapsulation models. Specifying the packet field ID with the needed encapsulation level is able to quickly get a packet field for any inner packet header. Alternatively, the special ID (ITEM_START) can be used to point to the very beginning of a packet. This ID in conjunction with the offset parameter provides great flexibility to copy/modify any part of a packet as needed. The number of bits to use from a source as well as the offset can be be specified to allow a partial copy or dividing a big packet field into multiple small fields (e.g. copying 128 bits of IPv6 to 4 tags). An immediate value (or a pointer to it) can be specified instead of the level and the offset for the special FIELD_VALUE ID (or FIELD_POINTER). Can be used as a source only. Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
2021-01-18 21:40:25 +00:00
/**
* Field IDs for MODIFY_FIELD action.
*/
enum rte_flow_field_id {
RTE_FLOW_FIELD_START = 0, /**< Start of a packet. */
RTE_FLOW_FIELD_MAC_DST, /**< Destination MAC Address. */
RTE_FLOW_FIELD_MAC_SRC, /**< Source MAC Address. */
RTE_FLOW_FIELD_VLAN_TYPE, /**< 802.1Q Tag Identifier. */
RTE_FLOW_FIELD_VLAN_ID, /**< 802.1Q VLAN Identifier. */
RTE_FLOW_FIELD_MAC_TYPE, /**< EtherType. */
RTE_FLOW_FIELD_IPV4_DSCP, /**< IPv4 DSCP. */
RTE_FLOW_FIELD_IPV4_TTL, /**< IPv4 Time To Live. */
RTE_FLOW_FIELD_IPV4_SRC, /**< IPv4 Source Address. */
RTE_FLOW_FIELD_IPV4_DST, /**< IPv4 Destination Address. */
RTE_FLOW_FIELD_IPV6_DSCP, /**< IPv6 DSCP. */
RTE_FLOW_FIELD_IPV6_HOPLIMIT, /**< IPv6 Hop Limit. */
RTE_FLOW_FIELD_IPV6_SRC, /**< IPv6 Source Address. */
RTE_FLOW_FIELD_IPV6_DST, /**< IPv6 Destination Address. */
RTE_FLOW_FIELD_TCP_PORT_SRC, /**< TCP Source Port Number. */
RTE_FLOW_FIELD_TCP_PORT_DST, /**< TCP Destination Port Number. */
RTE_FLOW_FIELD_TCP_SEQ_NUM, /**< TCP Sequence Number. */
RTE_FLOW_FIELD_TCP_ACK_NUM, /**< TCP Acknowledgment Number. */
RTE_FLOW_FIELD_TCP_FLAGS, /**< TCP Flags. */
RTE_FLOW_FIELD_UDP_PORT_SRC, /**< UDP Source Port Number. */
RTE_FLOW_FIELD_UDP_PORT_DST, /**< UDP Destination Port Number. */
RTE_FLOW_FIELD_VXLAN_VNI, /**< VXLAN Network Identifier. */
RTE_FLOW_FIELD_GENEVE_VNI, /**< GENEVE Network Identifier. */
RTE_FLOW_FIELD_GTP_TEID, /**< GTP Tunnel Endpoint Identifier. */
RTE_FLOW_FIELD_TAG, /**< Tag value. */
RTE_FLOW_FIELD_MARK, /**< Mark value. */
RTE_FLOW_FIELD_META, /**< Metadata value. */
RTE_FLOW_FIELD_POINTER, /**< Memory pointer. */
RTE_FLOW_FIELD_VALUE, /**< Immediate value. */
ethdev: introduce generic modify flow action Implement the generic modify flow API to allow manipulations on an arbitrary header field (as well as mark, metadata or tag) using data from another field or a user-specified value. This generic modify mechanism removes the necessity to implement a separate RTE Flow action every time we need to modify a new packet field in the future. Supported operation are: - set: copy data from source to destination. - add: integer addition, stores the result in destination. - sub: integer subtraction, stores the result in destination. The field ID is used to specify the desired source/destination packet field in order to simplify the API for various encapsulation models. Specifying the packet field ID with the needed encapsulation level is able to quickly get a packet field for any inner packet header. Alternatively, the special ID (ITEM_START) can be used to point to the very beginning of a packet. This ID in conjunction with the offset parameter provides great flexibility to copy/modify any part of a packet as needed. The number of bits to use from a source as well as the offset can be be specified to allow a partial copy or dividing a big packet field into multiple small fields (e.g. copying 128 bits of IPv6 to 4 tags). An immediate value (or a pointer to it) can be specified instead of the level and the offset for the special FIELD_VALUE ID (or FIELD_POINTER). Can be used as a source only. Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Thomas Monjalon <thomas@monjalon.net> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
2021-01-18 21:40:25 +00:00
};
/**
* Field description for MODIFY_FIELD action.
*/
struct rte_flow_action_modify_data {
enum rte_flow_field_id field; /**< Field or memory type ID. */
RTE_STD_C11
union {
struct {
/**< Encapsulation level or tag index. */
uint32_t level;
/**< Number of bits to skip from a field. */
uint32_t offset;
};
/**
* Immediate value for RTE_FLOW_FIELD_VALUE or
* memory address for RTE_FLOW_FIELD_POINTER.
*/
uint64_t value;
};
};
/**
* Operation types for MODIFY_FIELD action.
*/
enum rte_flow_modify_op {
RTE_FLOW_MODIFY_SET = 0, /**< Set a new value. */
RTE_FLOW_MODIFY_ADD, /**< Add a value to a field. */
RTE_FLOW_MODIFY_SUB, /**< Subtract a value from a field. */
};
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_MODIFY_FIELD
*
* Modify a destination header field according to the specified
* operation. Another packet field can be used as a source as well
* as tag, mark, metadata, immediate value or a pointer to it.
*/
struct rte_flow_action_modify_field {
enum rte_flow_modify_op operation; /**< Operation to perform. */
struct rte_flow_action_modify_data dst; /**< Destination field. */
struct rte_flow_action_modify_data src; /**< Source field. */
uint32_t width; /**< Number of bits to use from a source field. */
};
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
/* Mbuf dynamic field offset for metadata. */
extern int32_t rte_flow_dynf_metadata_offs;
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
/* Mbuf dynamic field flag mask for metadata. */
extern uint64_t rte_flow_dynf_metadata_mask;
/* Mbuf dynamic field pointer for metadata. */
#define RTE_FLOW_DYNF_METADATA(m) \
RTE_MBUF_DYNFIELD((m), rte_flow_dynf_metadata_offs, uint32_t *)
ethdev: move egress metadata to dynamic field The dynamic mbuf fields were introduced by [1]. The egress metadata is good candidate to be moved from statically allocated field tx_metadata to dynamic one. Because mbufs are used in half-duplex fashion only, it is safe to share this dynamic field with ingress metadata. The shared dynamic field contains either egress (if application going to transmit mbuf with tx_burst) or ingress (if mbuf is received with rx_burst) metadata and can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_TX_DYNF_METADATA/PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior accessing the data. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. DEV_TX_OFFLOAD_MATCH_METADATA offload and configuration flag is removed. The metadata support in PMDs is engaged on dynamic field registration. Metadata feature is getting complex. We might have some set of actions and items that might be supported by PMDs in multiple combinations, the supported values and masks are the subjects to query by perfroming trials (with rte_flow_validate). [1] http://patches.dpdk.org/patch/62040/ Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:31 +00:00
/* Mbuf dynamic flags for metadata. */
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
#define PKT_RX_DYNF_METADATA (rte_flow_dynf_metadata_mask)
ethdev: move egress metadata to dynamic field The dynamic mbuf fields were introduced by [1]. The egress metadata is good candidate to be moved from statically allocated field tx_metadata to dynamic one. Because mbufs are used in half-duplex fashion only, it is safe to share this dynamic field with ingress metadata. The shared dynamic field contains either egress (if application going to transmit mbuf with tx_burst) or ingress (if mbuf is received with rx_burst) metadata and can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_TX_DYNF_METADATA/PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior accessing the data. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. DEV_TX_OFFLOAD_MATCH_METADATA offload and configuration flag is removed. The metadata support in PMDs is engaged on dynamic field registration. Metadata feature is getting complex. We might have some set of actions and items that might be supported by PMDs in multiple combinations, the supported values and masks are the subjects to query by perfroming trials (with rte_flow_validate). [1] http://patches.dpdk.org/patch/62040/ Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:31 +00:00
#define PKT_TX_DYNF_METADATA (rte_flow_dynf_metadata_mask)
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
__rte_experimental
static inline uint32_t
rte_flow_dynf_metadata_get(struct rte_mbuf *m)
{
return *RTE_FLOW_DYNF_METADATA(m);
}
__rte_experimental
static inline void
rte_flow_dynf_metadata_set(struct rte_mbuf *m, uint32_t v)
{
*RTE_FLOW_DYNF_METADATA(m) = v;
}
/*
* Definition of a single action.
*
* A list of actions is terminated by a END action.
*
* For simple actions without a configuration object, conf remains NULL.
*/
struct rte_flow_action {
enum rte_flow_action_type type; /**< Action type. */
const void *conf; /**< Pointer to action configuration object. */
};
/**
* Opaque type returned after successfully creating a flow.
*
* This handle can be used to manage and query the related flow (e.g. to
* destroy it or retrieve counters).
*/
struct rte_flow;
/**
* @warning
* @b EXPERIMENTAL: this structure may change without prior notice
*
* RTE_FLOW_ACTION_TYPE_SAMPLE
*
* Adds a sample action to a matched flow.
*
* The matching packets will be duplicated with specified ratio and applied
* with own set of actions with a fate action, the sampled packet could be
* redirected to queue or port. All the packets continue processing on the
* default flow path.
*
* When the sample ratio is set to 1 then the packets will be 100% mirrored.
* Additional action list be supported to add for sampled or mirrored packets.
*/
struct rte_flow_action_sample {
uint32_t ratio; /**< packets sampled equals to '1/ratio'. */
const struct rte_flow_action *actions;
/**< sub-action list specific for the sampling hit cases. */
};
/**
* Verbose error types.
*
* Most of them provide the type of the object referenced by struct
* rte_flow_error.cause.
*/
enum rte_flow_error_type {
RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, /**< Transfer field. */
RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, /**< Item specification. */
RTE_FLOW_ERROR_TYPE_ITEM_LAST, /**< Item specification range. */
RTE_FLOW_ERROR_TYPE_ITEM_MASK, /**< Item specification mask. */
RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
RTE_FLOW_ERROR_TYPE_ACTION_CONF, /**< Action configuration. */
RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
};
/**
* Verbose error structure definition.
*
* This object is normally allocated by applications and set by PMDs, the
* message points to a constant string which does not need to be freed by
* the application, however its pointer can be considered valid only as long
* as its associated DPDK port remains configured. Closing the underlying
* device or unloading the PMD invalidates it.
*
* Both cause and message may be NULL regardless of the error type.
*/
struct rte_flow_error {
enum rte_flow_error_type type; /**< Cause field and error types. */
const void *cause; /**< Object responsible for the error. */
const char *message; /**< Human-readable error message. */
};
/**
* Complete flow rule description.
*
* This object type is used when converting a flow rule description.
*
* @see RTE_FLOW_CONV_OP_RULE
* @see rte_flow_conv()
*/
RTE_STD_C11
struct rte_flow_conv_rule {
union {
const struct rte_flow_attr *attr_ro; /**< RO attributes. */
struct rte_flow_attr *attr; /**< Attributes. */
};
union {
const struct rte_flow_item *pattern_ro; /**< RO pattern. */
struct rte_flow_item *pattern; /**< Pattern items. */
};
union {
const struct rte_flow_action *actions_ro; /**< RO actions. */
struct rte_flow_action *actions; /**< List of actions. */
};
};
/**
* Conversion operations for flow API objects.
*
* @see rte_flow_conv()
*/
enum rte_flow_conv_op {
/**
* No operation to perform.
*
* rte_flow_conv() simply returns 0.
*/
RTE_FLOW_CONV_OP_NONE,
/**
* Convert attributes structure.
*
* This is a basic copy of an attributes structure.
*
* - @p src type:
* @code const struct rte_flow_attr * @endcode
* - @p dst type:
* @code struct rte_flow_attr * @endcode
*/
RTE_FLOW_CONV_OP_ATTR,
/**
* Convert a single item.
*
* Duplicates @p spec, @p last and @p mask but not outside objects.
*
* - @p src type:
* @code const struct rte_flow_item * @endcode
* - @p dst type:
* @code struct rte_flow_item * @endcode
*/
RTE_FLOW_CONV_OP_ITEM,
/**
* Convert a single action.
*
* Duplicates @p conf but not outside objects.
*
* - @p src type:
* @code const struct rte_flow_action * @endcode
* - @p dst type:
* @code struct rte_flow_action * @endcode
*/
RTE_FLOW_CONV_OP_ACTION,
/**
* Convert an entire pattern.
*
* Duplicates all pattern items at once with the same constraints as
* RTE_FLOW_CONV_OP_ITEM.
*
* - @p src type:
* @code const struct rte_flow_item * @endcode
* - @p dst type:
* @code struct rte_flow_item * @endcode
*/
RTE_FLOW_CONV_OP_PATTERN,
/**
* Convert a list of actions.
*
* Duplicates the entire list of actions at once with the same
* constraints as RTE_FLOW_CONV_OP_ACTION.
*
* - @p src type:
* @code const struct rte_flow_action * @endcode
* - @p dst type:
* @code struct rte_flow_action * @endcode
*/
RTE_FLOW_CONV_OP_ACTIONS,
/**
* Convert a complete flow rule description.
*
* Comprises attributes, pattern and actions together at once with
* the usual constraints.
*
* - @p src type:
* @code const struct rte_flow_conv_rule * @endcode
* - @p dst type:
* @code struct rte_flow_conv_rule * @endcode
*/
RTE_FLOW_CONV_OP_RULE,
/**
* Convert item type to its name string.
*
* Writes a NUL-terminated string to @p dst. Like snprintf(), the
* returned value excludes the terminator which is always written
* nonetheless.
*
* - @p src type:
* @code (const void *)enum rte_flow_item_type @endcode
* - @p dst type:
* @code char * @endcode
**/
RTE_FLOW_CONV_OP_ITEM_NAME,
/**
* Convert action type to its name string.
*
* Writes a NUL-terminated string to @p dst. Like snprintf(), the
* returned value excludes the terminator which is always written
* nonetheless.
*
* - @p src type:
* @code (const void *)enum rte_flow_action_type @endcode
* - @p dst type:
* @code char * @endcode
**/
RTE_FLOW_CONV_OP_ACTION_NAME,
/**
* Convert item type to pointer to item name.
*
* Retrieves item name pointer from its type. The string itself is
* not copied; instead, a unique pointer to an internal static
* constant storage is written to @p dst.
*
* - @p src type:
* @code (const void *)enum rte_flow_item_type @endcode
* - @p dst type:
* @code const char ** @endcode
*/
RTE_FLOW_CONV_OP_ITEM_NAME_PTR,
/**
* Convert action type to pointer to action name.
*
* Retrieves action name pointer from its type. The string itself is
* not copied; instead, a unique pointer to an internal static
* constant storage is written to @p dst.
*
* - @p src type:
* @code (const void *)enum rte_flow_action_type @endcode
* - @p dst type:
* @code const char ** @endcode
*/
RTE_FLOW_CONV_OP_ACTION_NAME_PTR,
};
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
* Dump hardware internal representation information of
* rte flow to file.
*
* @param[in] port_id
* The port identifier of the Ethernet device.
* @param[in] flow
* The pointer of flow rule to dump. Dump all rules if NULL.
* @param[in] file
* A pointer to a file for output.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
* 0 on success, a nagative value otherwise.
*/
__rte_experimental
int
rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
FILE *file, struct rte_flow_error *error);
ethdev: extend flow metadata Currently, metadata can be set on egress path via mbuf tx_metadata field with PKT_TX_METADATA flag and RTE_FLOW_ITEM_TYPE_META matches metadata. This patch extends the metadata feature usability. 1) RTE_FLOW_ACTION_TYPE_SET_META When supporting multiple tables, Tx metadata can also be set by a rule and matched by another rule. This new action allows metadata to be set as a result of flow match. 2) Metadata on ingress There's also need to support metadata on ingress. Metadata can be set by SET_META action and matched by META item like Tx. The final value set by the action will be delivered to application via metadata dynamic field of mbuf which can be accessed by RTE_FLOW_DYNF_METADATA() macro or with rte_flow_dynf_metadata_set() and rte_flow_dynf_metadata_get() helper routines. PKT_RX_DYNF_METADATA flag will be set along with the data. The mbuf dynamic field must be registered by calling rte_flow_dynf_metadata_register() prior to use SET_META action. The availability of dynamic mbuf metadata field can be checked with rte_flow_dynf_metadata_avail() routine. If application is going to engage the metadata feature it registers the metadata dynamic fields, then PMD checks the metadata field availability and handles the appropriate fields in datapath. For loopback/hairpin packet, metadata set on Rx/Tx may or may not be propagated to the other path depending on hardware capability. MARK and METADATA look similar and might operate in similar way, but not interacting. Initially, there were proposed two metadata related actions: - RTE_FLOW_ACTION_TYPE_FLAG - RTE_FLOW_ACTION_TYPE_MARK These actions set the special flag in the packet metadata, MARK action stores some specified value in the metadata storage, and, on the packet receiving PMD puts the flag and value to the mbuf and applications can see the packet was threated inside flow engine according to the appropriate RTE flow(s). MARK and FLAG are like some kind of gateway to transfer some per-packet information from the flow engine to the application via receiving datapath. Also, there is the item of type RTE_FLOW_ITEM_TYPE_MARK provided. It allows us to extend the flow match pattern with the capability to match the metadata values set by MARK/FLAG actions on other flows. From the datapath point of view, the MARK and FLAG are related to the receiving side only. It would useful to have the same gateway on the transmitting side and there was the feature of type RTE_FLOW_ITEM_TYPE_META was proposed. The application can fill the field in mbuf and this value will be transferred to some field in the packet metadata inside the flow engine. It did not matter whether these metadata fields are shared because of MARK and META items belonged to different domains (receiving and transmitting) and could be vendor-specific. So far, so good, DPDK proposes some entities to control metadata inside the flow engine and gateways to exchange these values on a per-packet basis via datapaths. As we can see, the MARK and META means are not symmetric, there is absent action which would allow us to set META value on the transmitting path. So, the action of type: - RTE_FLOW_ACTION_TYPE_SET_META was proposed. The next, applications raise the new requirements for packet metadata. The flow ngines are getting more complex, internal switches are introduced, multiple ports might be supported within the same flow engine namespace. From the DPDK points of view, it means the packets might be sent on one eth_dev port and received on the other one, and the packet path inside the flow engine entirely belongs to the same hardware device. The simplest example is SR-IOV with PF, VFs and the representors. And there is a brilliant opportunity to provide some out-of-band channel to transfer some extra data from one port to another one, besides the packet data itself. And applications would like to use this opportunity. It is supposed for application to use trials (with rte_flow_validate) to detect which metadata features (FLAG, MARK, META) actually supported by PMD and underlying hardware. It might depend on PMD configuration, system software, hardware settings, etc., and should be detected in run time. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Andrew Rybchenko <arybchenko@solarflare.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Ori Kam <orika@mellanox.com>
2019-11-05 14:19:30 +00:00
/**
* Check if mbuf dynamic field for metadata is registered.
*
* @return
* True if registered, false otherwise.
*/
__rte_experimental
static inline int
rte_flow_dynf_metadata_avail(void)
{
return !!rte_flow_dynf_metadata_mask;
}
/**
* Register mbuf dynamic field and flag for metadata.
*
* This function must be called prior to use SET_META action in order to
* register the dynamic mbuf field. Otherwise, the data cannot be delivered to
* application.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
int
rte_flow_dynf_metadata_register(void);
/**
* Check whether a flow rule can be created on a given port.
*
* The flow rule is validated for correctness and whether it could be accepted
* by the device given sufficient resources. The rule is checked against the
* current device mode and queue configuration. The flow rule may also
* optionally be validated against existing flow rules and device resources.
* This function has no effect on the target device.
*
* The returned value is guaranteed to remain valid only as long as no
* successful calls to rte_flow_create() or rte_flow_destroy() are made in
* the meantime and no device parameter affecting flow rules in any way are
* modified, due to possible collisions or resource limitations (although in
* such cases EINVAL should not be returned).
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 if flow rule is valid and can be created. A negative errno value
* otherwise (rte_errno is also set), the following errors are defined:
*
* -ENOSYS: underlying device does not support this functionality.
*
* -EIO: underlying device is removed.
*
* -EINVAL: unknown or invalid rule specification.
*
* -ENOTSUP: valid but unsupported rule specification (e.g. partial
* bit-masks are unsupported).
*
* -EEXIST: collision with an existing rule. Only returned if device
* supports flow rule collision checking and there was a flow rule
* collision. Not receiving this return code is no guarantee that creating
* the rule will not fail due to a collision.
*
* -ENOMEM: not enough memory to execute the function, or if the device
* supports resource validation, resource limitation on the device.
*
* -EBUSY: action cannot be performed due to busy device resources, may
* succeed if the affected queues or even the entire port are in a stopped
* state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
*/
int
rte_flow_validate(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
/**
* Create a flow rule on a given port.
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* A valid handle in case of success, NULL otherwise and rte_errno is set
* to the positive version of one of the error codes defined for
* rte_flow_validate().
*/
struct rte_flow *
rte_flow_create(uint16_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
/**
* Destroy a flow rule on a given port.
*
* Failure to destroy a flow rule handle may occur when other flow rules
* depend on it, and destroying it would result in an inconsistent state.
*
* This function is only guaranteed to succeed if handles are destroyed in
* reverse order of their creation.
*
* @param port_id
* Port identifier of Ethernet device.
* @param flow
* Flow rule handle to destroy.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
rte_flow_destroy(uint16_t port_id,
struct rte_flow *flow,
struct rte_flow_error *error);
/**
* Destroy all flow rules associated with a port.
*
* In the unlikely event of failure, handles are still considered destroyed
* and no longer valid but the port must be assumed to be in an inconsistent
* state.
*
* @param port_id
* Port identifier of Ethernet device.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
rte_flow_flush(uint16_t port_id,
struct rte_flow_error *error);
/**
* Query an existing flow rule.
*
* This function allows retrieving flow-specific data such as counters.
* Data is gathered by special actions which must be present in the flow
* rule definition.
*
* \see RTE_FLOW_ACTION_TYPE_COUNT
*
* @param port_id
* Port identifier of Ethernet device.
* @param flow
* Flow rule handle to query.
* @param action
* Action definition as defined in original flow rule.
* @param[in, out] data
* Pointer to storage for the associated query data type.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
rte_flow_query(uint16_t port_id,
struct rte_flow *flow,
const struct rte_flow_action *action,
void *data,
struct rte_flow_error *error);
/**
* Restrict ingress traffic to the defined flow rules.
*
* Isolated mode guarantees that all ingress traffic comes from defined flow
* rules only (current and future).
*
* Besides making ingress more deterministic, it allows PMDs to safely reuse
* resources otherwise assigned to handle the remaining traffic, such as
* global RSS configuration settings, VLAN filters, MAC address entries,
* legacy filter API rules and so on in order to expand the set of possible
* flow rule types.
*
* Calling this function as soon as possible after device initialization,
* ideally before the first call to rte_eth_dev_configure(), is recommended
* to avoid possible failures due to conflicting settings.
*
* Once effective, leaving isolated mode may not be possible depending on
* PMD implementation.
*
* Additionally, the following functionality has no effect on the underlying
* port and may return errors such as ENOTSUP ("not supported"):
*
* - Toggling promiscuous mode.
* - Toggling allmulticast mode.
* - Configuring MAC addresses.
* - Configuring multicast addresses.
* - Configuring VLAN filters.
* - Configuring Rx filters through the legacy API (e.g. FDIR).
* - Configuring global RSS settings.
*
* @param port_id
* Port identifier of Ethernet device.
* @param set
* Nonzero to enter isolated mode, attempt to leave it otherwise.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
rte_flow_isolate(uint16_t port_id, int set, struct rte_flow_error *error);
/**
* Initialize flow error structure.
*
* @param[out] error
* Pointer to flow error structure (may be NULL).
* @param code
* Related error code (rte_errno).
* @param type
* Cause field and error types.
* @param cause
* Object responsible for the error.
* @param message
* Human-readable error message.
*
* @return
* Negative error code (errno value) and rte_errno is set.
*/
int
rte_flow_error_set(struct rte_flow_error *error,
int code,
enum rte_flow_error_type type,
const void *cause,
const char *message);
/**
* @deprecated
* @see rte_flow_copy()
*/
struct rte_flow_desc {
size_t size; /**< Allocated space including data[]. */
struct rte_flow_attr attr; /**< Attributes. */
struct rte_flow_item *items; /**< Items. */
struct rte_flow_action *actions; /**< Actions. */
uint8_t data[]; /**< Storage for items/actions. */
};
/**
* @deprecated
* Copy an rte_flow rule description.
*
* This interface is kept for compatibility with older applications but is
* implemented as a wrapper to rte_flow_conv(). It is deprecated due to its
* lack of flexibility and reliance on a type unusable with C++ programs
* (struct rte_flow_desc).
*
* @param[in] fd
* Flow rule description.
* @param[in] len
* Total size of allocated data for the flow description.
* @param[in] attr
* Flow rule attributes.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
*
* @return
* If len is greater or equal to the size of the flow, the total size of the
* flow description and its data.
* If len is lower than the size of the flow, the number of bytes that would
* have been written to desc had it been sufficient. Nothing is written.
*/
__rte_deprecated
size_t
rte_flow_copy(struct rte_flow_desc *fd, size_t len,
const struct rte_flow_attr *attr,
const struct rte_flow_item *items,
const struct rte_flow_action *actions);
/**
* Flow object conversion helper.
*
* This function performs conversion of various flow API objects to a
* pre-allocated destination buffer. See enum rte_flow_conv_op for possible
* operations and details about each of them.
*
* Since destination buffer must be large enough, it works in a manner
* reminiscent of snprintf():
*
* - If @p size is 0, @p dst may be a NULL pointer, otherwise @p dst must be
* non-NULL.
* - If positive, the returned value represents the number of bytes needed
* to store the conversion of @p src to @p dst according to @p op
* regardless of the @p size parameter.
* - Since no more than @p size bytes can be written to @p dst, output is
* truncated and may be inconsistent when the returned value is larger
* than that.
* - In case of conversion error, a negative error code is returned and
* @p dst contents are unspecified.
*
* @param op
* Operation to perform, related to the object type of @p dst.
* @param[out] dst
* Destination buffer address. Must be suitably aligned by the caller.
* @param size
* Destination buffer size in bytes.
* @param[in] src
* Source object to copy. Depending on @p op, its type may differ from
* that of @p dst.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*
* @return
* The number of bytes required to convert @p src to @p dst on success, a
* negative errno value otherwise and rte_errno is set.
*
* @see rte_flow_conv_op
*/
__rte_experimental
int
rte_flow_conv(enum rte_flow_conv_op op,
void *dst,
size_t size,
const void *src,
struct rte_flow_error *error);
/**
* Get aged-out flows of a given port.
*
* RTE_ETH_EVENT_FLOW_AGED event will be triggered when at least one new aged
* out flow was detected after the last call to rte_flow_get_aged_flows.
* This function can be called to get the aged flows usynchronously from the
* event callback or synchronously regardless the event.
* This is not safe to call rte_flow_get_aged_flows function with other flow
* functions from multiple threads simultaneously.
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in, out] contexts
* The address of an array of pointers to the aged-out flows contexts.
* @param[in] nb_contexts
* The length of context array pointers.
* @param[out] error
* Perform verbose error reporting if not NULL. Initialized in case of
* error only.
*
* @return
* if nb_contexts is 0, return the amount of all aged contexts.
* if nb_contexts is not 0 , return the amount of aged flows reported
* in the context array, otherwise negative errno value.
*
* @see rte_flow_action_age
* @see RTE_ETH_EVENT_FLOW_AGED
*/
__rte_experimental
int
rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
uint32_t nb_contexts, struct rte_flow_error *error);
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
/**
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Specify indirect action object configuration
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*/
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
struct rte_flow_indir_action_conf {
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
/**
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Flow direction for the indirect action configuration.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Action should be valid at least for one flow direction,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* otherwise it is invalid for both ingress and egress rules.
*/
uint32_t ingress:1;
/**< Action valid for rules applied to ingress traffic. */
uint32_t egress:1;
/**< Action valid for rules applied to egress traffic. */
/**
* When set to 1, indicates that the action is valid for
* transfer traffic; otherwise, for non-transfer traffic.
*/
uint32_t transfer:1;
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
};
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Create an indirect action object that can be used in flow rules
* via its handle.
* The created object handle has single state and configuration
* across all the flow rules using it.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
* @param[in] port_id
* The port identifier of the Ethernet device.
* @param[in] conf
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Action configuration for the indirect action object creation.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* @param[in] action
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Specific configuration of the indirect action object.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
* A valid handle in case of success, NULL otherwise and rte_errno is set
* to one of the error codes defined:
* - (ENODEV) if *port_id* invalid.
* - (ENOSYS) if underlying device does not support this functionality.
* - (EIO) if underlying device is removed.
* - (EINVAL) if *action* invalid.
* - (ENOTSUP) if *action* valid but unsupported.
*/
__rte_experimental
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
struct rte_flow_action_handle *
rte_flow_action_handle_create(uint16_t port_id,
const struct rte_flow_indir_action_conf *conf,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
const struct rte_flow_action *action,
struct rte_flow_error *error);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Destroy indirect action by handle.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
* @param[in] port_id
* The port identifier of the Ethernet device.
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* @param[in] handle
* Handle for the indirect action object to be destroyed.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
* - (0) if success.
* - (-ENODEV) if *port_id* invalid.
* - (-ENOSYS) if underlying device does not support this functionality.
* - (-EIO) if underlying device is removed.
* - (-ENOENT) if action pointed by *action* handle was not found.
* - (-EBUSY) if action pointed by *action* handle still used by some rules
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* rte_errno is also set.
*/
__rte_experimental
int
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
rte_flow_action_handle_destroy(uint16_t port_id,
struct rte_flow_action_handle *handle,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
struct rte_flow_error *error);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Update in-place the action configuration and / or state pointed
* by action *handle* with the configuration provided as *update* argument.
* The update of the action configuration effects all flow rules reusing
* the action via *handle*.
* The update general pointer provides the ability of partial updating.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
* @param[in] port_id
* The port identifier of the Ethernet device.
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* @param[in] handle
* Handle for the indirect action object to be updated.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* @param[in] update
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Update profile specification used to modify the action pointed by handle.
* *update* could be with the same type of the immediate action corresponding
* to the *handle* argument when creating, or a wrapper structure includes
* action configuration to be updated and bit fields to indicate the member
* of fields inside the action to update.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
* - (0) if success.
* - (-ENODEV) if *port_id* invalid.
* - (-ENOSYS) if underlying device does not support this functionality.
* - (-EIO) if underlying device is removed.
* - (-EINVAL) if *update* invalid.
* - (-ENOTSUP) if *update* valid but unsupported.
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* - (-ENOENT) if indirect action object pointed by *handle* was not found.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* rte_errno is also set.
*/
__rte_experimental
int
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
rte_flow_action_handle_update(uint16_t port_id,
struct rte_flow_action_handle *handle,
const void *update,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
struct rte_flow_error *error);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* Query the direct action by corresponding indirect action object handle.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
* Retrieve action-specific data such as counters.
* Data is gathered by special action which may be present/referenced in
* more than one flow rule definition.
*
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* @see RTE_FLOW_ACTION_TYPE_COUNT
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
*
* @param port_id
* Port identifier of Ethernet device.
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
* @param[in] handle
* Handle for the action object to query.
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
* @param[in, out] data
* Pointer to storage for the associated query data type.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
int
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
rte_flow_action_handle_query(uint16_t port_id,
const struct rte_flow_action_handle *handle,
void *data, struct rte_flow_error *error);
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 11:40:14 +00:00
ethdev: add tunnel offload model rte_flow API provides the building blocks for vendor-agnostic flow classification offloads. The rte_flow "patterns" and "actions" primitives are fine-grained, thus enabling DPDK applications the flexibility to offload network stacks and complex pipelines. Applications wishing to offload tunneled traffic are required to use the rte_flow primitives, such as group, meta, mark, tag, and others to model their high-level objects. The hardware model design for high-level software objects is not trivial. Furthermore, an optimal design is often vendor-specific. When hardware offloads tunneled traffic in multi-group logic, partially offloaded packets may arrive to the application after they were modified in hardware. In this case, the application may need to restore the original packet headers. Consider the following sequence: The application decaps a packet in one group and jumps to a second group where it tries to match on a 5-tuple, that will miss and send the packet to the application. In this case, the application does not receive the original packet but a modified one. Also, in this case, the application cannot match on the outer header fields, such as VXLAN vni and 5-tuple. There are several possible ways to use rte_flow "patterns" and "actions" to resolve the issues above. For example: 1 Mapping headers to a hardware registers using the rte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects. 2 Apply the decap only at the last offload stage after all the "patterns" were matched and the packet will be fully offloaded. Every approach has its pros and cons and is highly dependent on the hardware vendor. For example, some hardware may have a limited number of registers while other hardware could not support inner actions and must decap before accessing inner headers. The tunnel offload model resolves these issues. The model goals are: 1 Provide a unified application API to offload tunneled traffic that is capable to match on outer headers after decap. 2 Allow the application to restore the outer header of partially offloaded packets. The tunnel offload model does not introduce new elements to the existing RTE flow model and is implemented as a set of helper functions. For the application to work with the tunnel offload API it has to adjust flow rules in multi-table tunnel offload in the following way: 1 Remove explicit call to decap action and replace it with PMD actions obtained from rte_flow_tunnel_decap_and_set() helper. 2 Add PMD items obtained from rte_flow_tunnel_match() helper to all other rules in the tunnel offload sequence. VXLAN Code example: Assume application needs to do inner NAT on the VXLAN packet. The first rule in group 0: flow create <port id> ingress group 0 pattern eth / ipv4 / udp dst is 4789 / vxlan / end actions {pmd actions} / jump group 3 / end The first VXLAN packet that arrives matches the rule in group 0 and jumps to group 3. In group 3 the packet will miss since there is no flow to match and will be sent to the application. Application will call rte_flow_get_restore_info() to get the packet outer header. Application will insert a new rule in group 3 to match outer and inner headers: flow create <port id> ingress group 3 pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 / end actions set_ipv4_dst 186.1.1.1 / queue index 3 / end Resulting of the rules will be that VXLAN packet with vni=10, outer IPv4 dst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received decapped on queue 3 with IPv4 dst=186.1.1.1 Note: The packet in group 3 is considered decapped. All actions in that group will be done on the header that was inner before decap. The application may specify an outer header to be matched on. It's PMD responsibility to translate these items to outer metadata. API usage: /** * 1. Initiate RTE flow tunnel object */ const struct rte_flow_tunnel tunnel = { .type = RTE_FLOW_ITEM_TYPE_VXLAN, .tun_id = 10, } /** * 2. Obtain PMD tunnel actions * * pmd_actions is an intermediate variable application uses to * compile actions array */ struct rte_flow_action **pmd_actions; rte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions, &num_pmd_actions, &error); /** * 3. offload the first rule * matching on VXLAN traffic and jumps to group 3 * (implicitly decaps packet) */ app_actions = jump group 3 rule_items = app_items; /** eth / ipv4 / udp / vxlan */ rule_actions = { pmd_actions, app_actions }; attr.group = 0; flow_1 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 4. after flow creation application does not need to keep the * tunnel action resources. */ rte_flow_tunnel_action_release(port_id, pmd_actions, num_pmd_actions); /** * 5. After partially offloaded packet miss because there was no * matching rule handle miss on group 3 */ struct rte_flow_restore_info info; rte_flow_get_restore_info(port_id, mbuf, &info, &error); /** * 6. Offload NAT rule: */ app_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 } app_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 } rte_flow_tunnel_match(&info.tunnel, &pmd_items, &num_pmd_items, &error); rule_items = {pmd_items, app_items}; rule_actions = app_actions; attr.group = info.group_id; flow_2 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 7. Release PMD items after rule creation */ rte_flow_tunnel_item_release(port_id, pmd_items, num_pmd_items); References 1. https://mails.dpdk.org/archives/dev/2020-June/index.html Signed-off-by: Eli Britstein <elibr@mellanox.com> Signed-off-by: Gregory Etelson <getelson@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2020-10-16 12:51:06 +00:00
/* Tunnel has a type and the key information. */
struct rte_flow_tunnel {
/**
* Tunnel type, for example RTE_FLOW_ITEM_TYPE_VXLAN,
* RTE_FLOW_ITEM_TYPE_NVGRE etc.
*/
enum rte_flow_item_type type;
uint64_t tun_id; /**< Tunnel identification. */
RTE_STD_C11
union {
struct {
rte_be32_t src_addr; /**< IPv4 source address. */
rte_be32_t dst_addr; /**< IPv4 destination address. */
} ipv4;
struct {
uint8_t src_addr[16]; /**< IPv6 source address. */
uint8_t dst_addr[16]; /**< IPv6 destination address. */
} ipv6;
};
rte_be16_t tp_src; /**< Tunnel port source. */
rte_be16_t tp_dst; /**< Tunnel port destination. */
uint16_t tun_flags; /**< Tunnel flags. */
bool is_ipv6; /**< True for valid IPv6 fields. Otherwise IPv4. */
/**
* the following members are required to restore packet
* after miss
*/
uint8_t tos; /**< TOS for IPv4, TC for IPv6. */
uint8_t ttl; /**< TTL for IPv4, HL for IPv6. */
uint32_t label; /**< Flow Label for IPv6. */
};
/**
* Indicate that the packet has a tunnel.
*/
#define RTE_FLOW_RESTORE_INFO_TUNNEL (1ULL << 0)
/**
* Indicate that the packet has a non decapsulated tunnel header.
*/
#define RTE_FLOW_RESTORE_INFO_ENCAPSULATED (1ULL << 1)
/**
* Indicate that the packet has a group_id.
*/
#define RTE_FLOW_RESTORE_INFO_GROUP_ID (1ULL << 2)
/**
* Restore information structure to communicate the current packet processing
* state when some of the processing pipeline is done in hardware and should
* continue in software.
*/
struct rte_flow_restore_info {
/**
* Bitwise flags (RTE_FLOW_RESTORE_INFO_*) to indicate validation of
* other fields in struct rte_flow_restore_info.
*/
uint64_t flags;
uint32_t group_id; /**< Group ID where packed missed */
struct rte_flow_tunnel tunnel; /**< Tunnel information. */
};
/**
* Allocate an array of actions to be used in rte_flow_create, to implement
* tunnel-decap-set for the given tunnel.
* Sample usage:
* actions vxlan_decap / tunnel-decap-set(tunnel properties) /
* jump group 0 / end
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in] tunnel
* Tunnel properties.
* @param[out] actions
* Array of actions to be allocated by the PMD. This array should be
* concatenated with the actions array provided to rte_flow_create.
* @param[out] num_of_actions
* Number of actions allocated.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
int
rte_flow_tunnel_decap_set(uint16_t port_id,
struct rte_flow_tunnel *tunnel,
struct rte_flow_action **actions,
uint32_t *num_of_actions,
struct rte_flow_error *error);
/**
* Allocate an array of items to be used in rte_flow_create, to implement
* tunnel-match for the given tunnel.
* Sample usage:
* pattern tunnel-match(tunnel properties) / outer-header-matches /
* inner-header-matches / end
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in] tunnel
* Tunnel properties.
* @param[out] items
* Array of items to be allocated by the PMD. This array should be
* concatenated with the items array provided to rte_flow_create.
* @param[out] num_of_items
* Number of items allocated.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
int
rte_flow_tunnel_match(uint16_t port_id,
struct rte_flow_tunnel *tunnel,
struct rte_flow_item **items,
uint32_t *num_of_items,
struct rte_flow_error *error);
/**
* Populate the current packet processing state, if exists, for the given mbuf.
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in] m
* Mbuf struct.
* @param[out] info
* Restore information. Upon success contains the HW state.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
int
rte_flow_get_restore_info(uint16_t port_id,
struct rte_mbuf *m,
struct rte_flow_restore_info *info,
struct rte_flow_error *error);
/**
* Release the action array as allocated by rte_flow_tunnel_decap_set.
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in] actions
* Array of actions to be released.
* @param[in] num_of_actions
* Number of elements in actions array.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
int
rte_flow_tunnel_action_decap_release(uint16_t port_id,
struct rte_flow_action *actions,
uint32_t num_of_actions,
struct rte_flow_error *error);
/**
* Release the item array as allocated by rte_flow_tunnel_match.
*
* @param port_id
* Port identifier of Ethernet device.
* @param[in] items
* Array of items to be released.
* @param[in] num_of_items
* Number of elements in item array.
* @param[out] error
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
__rte_experimental
int
rte_flow_tunnel_item_release(uint16_t port_id,
struct rte_flow_item *items,
uint32_t num_of_items,
struct rte_flow_error *error);
#ifdef __cplusplus
}
#endif
#endif /* RTE_FLOW_H_ */