numam-dpdk/lib/ethdev/rte_flow_driver.h

165 lines
4.6 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
* Copyright 2016 Mellanox Technologies, Ltd
*/
#ifndef RTE_FLOW_DRIVER_H_
#define RTE_FLOW_DRIVER_H_
/**
* @file
* RTE generic flow API (driver side)
*
* This file provides implementation helpers for internal use by PMDs, they
* are not intended to be exposed to applications and are not subject to ABI
* versioning.
*/
#include <stdint.h>
#include "rte_ethdev.h"
#include "ethdev_driver.h"
#include "rte_flow.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Generic flow operations structure implemented and returned by PMDs.
*
* These callback functions are not supposed to be used by applications
* directly, which must rely on the API defined in rte_flow.h.
*
* Public-facing wrapper functions perform a few consistency checks so that
* unimplemented (i.e. NULL) callbacks simply return -ENOTSUP. These
* callbacks otherwise only differ by their first argument (with port ID
* already resolved to a pointer to struct rte_eth_dev).
*/
struct rte_flow_ops {
/** See rte_flow_validate(). */
int (*validate)
(struct rte_eth_dev *,
const struct rte_flow_attr *,
const struct rte_flow_item [],
const struct rte_flow_action [],
struct rte_flow_error *);
/** See rte_flow_create(). */
struct rte_flow *(*create)
(struct rte_eth_dev *,
const struct rte_flow_attr *,
const struct rte_flow_item [],
const struct rte_flow_action [],
struct rte_flow_error *);
/** See rte_flow_destroy(). */
int (*destroy)
(struct rte_eth_dev *,
struct rte_flow *,
struct rte_flow_error *);
/** See rte_flow_flush(). */
int (*flush)
(struct rte_eth_dev *,
struct rte_flow_error *);
/** See rte_flow_query(). */
int (*query)
(struct rte_eth_dev *,
struct rte_flow *,
const struct rte_flow_action *,
void *,
struct rte_flow_error *);
/** See rte_flow_isolate(). */
int (*isolate)
(struct rte_eth_dev *,
int,
struct rte_flow_error *);
/** See rte_flow_dev_dump(). */
int (*dev_dump)
(struct rte_eth_dev *dev,
struct rte_flow *flow,
FILE *file,
struct rte_flow_error *error);
/** See rte_flow_get_aged_flows() */
int (*get_aged_flows)
(struct rte_eth_dev *dev,
void **context,
uint32_t nb_contexts,
struct rte_flow_error *err);
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
/** See rte_flow_action_handle_create() */
struct rte_flow_action_handle *(*action_handle_create)
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
(struct rte_eth_dev *dev,
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
const struct rte_flow_indir_action_conf *conf,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
const struct rte_flow_action *action,
struct rte_flow_error *error);
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
/** See rte_flow_action_handle_destroy() */
int (*action_handle_destroy)
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
(struct rte_eth_dev *dev,
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
struct rte_flow_action_handle *handle,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
struct rte_flow_error *error);
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
/** See rte_flow_action_handle_update() */
int (*action_handle_update)
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
(struct rte_eth_dev *dev,
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
struct rte_flow_action_handle *handle,
const void *update,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
struct rte_flow_error *error);
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
/** See rte_flow_action_handle_query() */
int (*action_handle_query)
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
(struct rte_eth_dev *dev,
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 22:38:29 +08:00
const struct rte_flow_action_handle *handle,
ethdev: add shared actions to flow API Introduce extension of flow action API enabling sharing of single rte_flow_action in multiple flows. The API intended for PMDs, where multiple HW offloaded flows can reuse the same HW essence/object representing flow action and modification of such an essence/object affects all the rules using it. Motivation and example === Adding or removing one or more queues to RSS used by multiple flow rules imposes per rule toll for current DPDK flow API; the scenario requires for each flow sharing cloned RSS action: - call `rte_flow_destroy()` - call `rte_flow_create()` with modified RSS action API for sharing action and its in-place update benefits: - reduce the overhead of multiple RSS flow rules reconfiguration - optimize resource utilization by sharing action across multiple flows Change description === Shared action === In order to represent flow action shared by multiple flows new action type RTE_FLOW_ACTION_TYPE_SHARED is introduced (see `enum rte_flow_action_type`). Actually the introduced API decouples action from any specific flow and enables sharing of single action by its handle across multiple flows. Shared action create/use/destroy === Shared action may be reused by some or none flow rules at any given moment, i.e. shared action resides outside of the context of any flow. Shared action represent HW resources/objects used for action offloading implementation. API for shared action create (see `rte_flow_shared_action_create()`): - should allocate HW resources and make related initializations required for shared action implementation. - make necessary preparations to maintain shared access to the action resources, configuration and state. API for shared action destroy (see `rte_flow_shared_action_destroy()`) should release HW resources and make related cleanups required for shared action implementation. In order to share some flow action reuse the handle of type `struct rte_flow_shared_action` returned by rte_flow_shared_action_create() as a `conf` field of `struct rte_flow_action` (see "example" section). If some shared action not used by any flow rule all resources allocated by the shared action can be released by rte_flow_shared_action_destroy() (see "example" section). The shared action handle passed as argument to destroy API should not be used any further i.e. result of the usage is undefined. Shared action re-configuration === Shared action behavior defined by its configuration can be updated via rte_flow_shared_action_update() (see "example" section). The shared action update operation modifies HW related resources/objects allocated on the action creation. The number of operations performed by the update operation should not depend on the number of flows sharing the related action. On return of shared action update API action behavior should be according to updated configuration for all flows sharing the action. Shared action query === Provide separate API to query shared action state (see rte_flow_shared_action_update()). Taking a counter as an example: query returns value aggregating all counter increments across all flow rules sharing the counter. This API doesn't query shared action configuration since it is controlled by rte_flow_shared_action_create() and rte_flow_shared_action_update() APIs and no supposed to change by other means. example === struct rte_flow_action actions[2]; struct rte_flow_shared_action_conf conf; struct rte_flow_action action; /* skipped: initialize conf and action */ struct rte_flow_shared_action *handle = rte_flow_shared_action_create(port_id, &conf, &action, &error); actions[0].type = RTE_FLOW_ACTION_TYPE_SHARED; actions[0].conf = handle; actions[1].type = RTE_FLOW_ACTION_TYPE_END; /* skipped: init attr0 & pattern0 args */ struct rte_flow *flow0 = rte_flow_create(port_id, &attr0, pattern0, actions, error); /* create more rules reusing shared action */ struct rte_flow *flow1 = rte_flow_create(port_id, &attr1, pattern1, actions, error); /* skipped: for flows 2 till N */ struct rte_flow *flowN = rte_flow_create(port_id, &attrN, patternN, actions, error); /* update shared action */ struct rte_flow_action updated_action; /* * skipped: initialize updated_action according to desired action * configuration change */ rte_flow_shared_action_update(port_id, handle, &updated_action, error); /* * from now on all flows 1 till N will act according to configuration of * updated_action */ /* skipped: destroy all flows 1 till N */ rte_flow_shared_action_destroy(port_id, handle, error); Signed-off-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-14 14:40:14 +03:00
void *data,
struct rte_flow_error *error);
ethdev: add tunnel offload model rte_flow API provides the building blocks for vendor-agnostic flow classification offloads. The rte_flow "patterns" and "actions" primitives are fine-grained, thus enabling DPDK applications the flexibility to offload network stacks and complex pipelines. Applications wishing to offload tunneled traffic are required to use the rte_flow primitives, such as group, meta, mark, tag, and others to model their high-level objects. The hardware model design for high-level software objects is not trivial. Furthermore, an optimal design is often vendor-specific. When hardware offloads tunneled traffic in multi-group logic, partially offloaded packets may arrive to the application after they were modified in hardware. In this case, the application may need to restore the original packet headers. Consider the following sequence: The application decaps a packet in one group and jumps to a second group where it tries to match on a 5-tuple, that will miss and send the packet to the application. In this case, the application does not receive the original packet but a modified one. Also, in this case, the application cannot match on the outer header fields, such as VXLAN vni and 5-tuple. There are several possible ways to use rte_flow "patterns" and "actions" to resolve the issues above. For example: 1 Mapping headers to a hardware registers using the rte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects. 2 Apply the decap only at the last offload stage after all the "patterns" were matched and the packet will be fully offloaded. Every approach has its pros and cons and is highly dependent on the hardware vendor. For example, some hardware may have a limited number of registers while other hardware could not support inner actions and must decap before accessing inner headers. The tunnel offload model resolves these issues. The model goals are: 1 Provide a unified application API to offload tunneled traffic that is capable to match on outer headers after decap. 2 Allow the application to restore the outer header of partially offloaded packets. The tunnel offload model does not introduce new elements to the existing RTE flow model and is implemented as a set of helper functions. For the application to work with the tunnel offload API it has to adjust flow rules in multi-table tunnel offload in the following way: 1 Remove explicit call to decap action and replace it with PMD actions obtained from rte_flow_tunnel_decap_and_set() helper. 2 Add PMD items obtained from rte_flow_tunnel_match() helper to all other rules in the tunnel offload sequence. VXLAN Code example: Assume application needs to do inner NAT on the VXLAN packet. The first rule in group 0: flow create <port id> ingress group 0 pattern eth / ipv4 / udp dst is 4789 / vxlan / end actions {pmd actions} / jump group 3 / end The first VXLAN packet that arrives matches the rule in group 0 and jumps to group 3. In group 3 the packet will miss since there is no flow to match and will be sent to the application. Application will call rte_flow_get_restore_info() to get the packet outer header. Application will insert a new rule in group 3 to match outer and inner headers: flow create <port id> ingress group 3 pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 / end actions set_ipv4_dst 186.1.1.1 / queue index 3 / end Resulting of the rules will be that VXLAN packet with vni=10, outer IPv4 dst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received decapped on queue 3 with IPv4 dst=186.1.1.1 Note: The packet in group 3 is considered decapped. All actions in that group will be done on the header that was inner before decap. The application may specify an outer header to be matched on. It's PMD responsibility to translate these items to outer metadata. API usage: /** * 1. Initiate RTE flow tunnel object */ const struct rte_flow_tunnel tunnel = { .type = RTE_FLOW_ITEM_TYPE_VXLAN, .tun_id = 10, } /** * 2. Obtain PMD tunnel actions * * pmd_actions is an intermediate variable application uses to * compile actions array */ struct rte_flow_action **pmd_actions; rte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions, &num_pmd_actions, &error); /** * 3. offload the first rule * matching on VXLAN traffic and jumps to group 3 * (implicitly decaps packet) */ app_actions = jump group 3 rule_items = app_items; /** eth / ipv4 / udp / vxlan */ rule_actions = { pmd_actions, app_actions }; attr.group = 0; flow_1 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 4. after flow creation application does not need to keep the * tunnel action resources. */ rte_flow_tunnel_action_release(port_id, pmd_actions, num_pmd_actions); /** * 5. After partially offloaded packet miss because there was no * matching rule handle miss on group 3 */ struct rte_flow_restore_info info; rte_flow_get_restore_info(port_id, mbuf, &info, &error); /** * 6. Offload NAT rule: */ app_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 } app_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 } rte_flow_tunnel_match(&info.tunnel, &pmd_items, &num_pmd_items, &error); rule_items = {pmd_items, app_items}; rule_actions = app_actions; attr.group = info.group_id; flow_2 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 7. Release PMD items after rule creation */ rte_flow_tunnel_item_release(port_id, pmd_items, num_pmd_items); References 1. https://mails.dpdk.org/archives/dev/2020-June/index.html Signed-off-by: Eli Britstein <elibr@mellanox.com> Signed-off-by: Gregory Etelson <getelson@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2020-10-16 15:51:06 +03:00
/** See rte_flow_tunnel_decap_set() */
int (*tunnel_decap_set)
(struct rte_eth_dev *dev,
struct rte_flow_tunnel *tunnel,
struct rte_flow_action **pmd_actions,
uint32_t *num_of_actions,
struct rte_flow_error *err);
/** See rte_flow_tunnel_match() */
int (*tunnel_match)
(struct rte_eth_dev *dev,
struct rte_flow_tunnel *tunnel,
struct rte_flow_item **pmd_items,
uint32_t *num_of_items,
struct rte_flow_error *err);
/** See rte_flow_get_rte_flow_restore_info() */
int (*get_restore_info)
(struct rte_eth_dev *dev,
struct rte_mbuf *m,
struct rte_flow_restore_info *info,
struct rte_flow_error *err);
/** See rte_flow_action_tunnel_decap_release() */
int (*tunnel_action_decap_release)
ethdev: add tunnel offload model rte_flow API provides the building blocks for vendor-agnostic flow classification offloads. The rte_flow "patterns" and "actions" primitives are fine-grained, thus enabling DPDK applications the flexibility to offload network stacks and complex pipelines. Applications wishing to offload tunneled traffic are required to use the rte_flow primitives, such as group, meta, mark, tag, and others to model their high-level objects. The hardware model design for high-level software objects is not trivial. Furthermore, an optimal design is often vendor-specific. When hardware offloads tunneled traffic in multi-group logic, partially offloaded packets may arrive to the application after they were modified in hardware. In this case, the application may need to restore the original packet headers. Consider the following sequence: The application decaps a packet in one group and jumps to a second group where it tries to match on a 5-tuple, that will miss and send the packet to the application. In this case, the application does not receive the original packet but a modified one. Also, in this case, the application cannot match on the outer header fields, such as VXLAN vni and 5-tuple. There are several possible ways to use rte_flow "patterns" and "actions" to resolve the issues above. For example: 1 Mapping headers to a hardware registers using the rte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects. 2 Apply the decap only at the last offload stage after all the "patterns" were matched and the packet will be fully offloaded. Every approach has its pros and cons and is highly dependent on the hardware vendor. For example, some hardware may have a limited number of registers while other hardware could not support inner actions and must decap before accessing inner headers. The tunnel offload model resolves these issues. The model goals are: 1 Provide a unified application API to offload tunneled traffic that is capable to match on outer headers after decap. 2 Allow the application to restore the outer header of partially offloaded packets. The tunnel offload model does not introduce new elements to the existing RTE flow model and is implemented as a set of helper functions. For the application to work with the tunnel offload API it has to adjust flow rules in multi-table tunnel offload in the following way: 1 Remove explicit call to decap action and replace it with PMD actions obtained from rte_flow_tunnel_decap_and_set() helper. 2 Add PMD items obtained from rte_flow_tunnel_match() helper to all other rules in the tunnel offload sequence. VXLAN Code example: Assume application needs to do inner NAT on the VXLAN packet. The first rule in group 0: flow create <port id> ingress group 0 pattern eth / ipv4 / udp dst is 4789 / vxlan / end actions {pmd actions} / jump group 3 / end The first VXLAN packet that arrives matches the rule in group 0 and jumps to group 3. In group 3 the packet will miss since there is no flow to match and will be sent to the application. Application will call rte_flow_get_restore_info() to get the packet outer header. Application will insert a new rule in group 3 to match outer and inner headers: flow create <port id> ingress group 3 pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 / end actions set_ipv4_dst 186.1.1.1 / queue index 3 / end Resulting of the rules will be that VXLAN packet with vni=10, outer IPv4 dst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received decapped on queue 3 with IPv4 dst=186.1.1.1 Note: The packet in group 3 is considered decapped. All actions in that group will be done on the header that was inner before decap. The application may specify an outer header to be matched on. It's PMD responsibility to translate these items to outer metadata. API usage: /** * 1. Initiate RTE flow tunnel object */ const struct rte_flow_tunnel tunnel = { .type = RTE_FLOW_ITEM_TYPE_VXLAN, .tun_id = 10, } /** * 2. Obtain PMD tunnel actions * * pmd_actions is an intermediate variable application uses to * compile actions array */ struct rte_flow_action **pmd_actions; rte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions, &num_pmd_actions, &error); /** * 3. offload the first rule * matching on VXLAN traffic and jumps to group 3 * (implicitly decaps packet) */ app_actions = jump group 3 rule_items = app_items; /** eth / ipv4 / udp / vxlan */ rule_actions = { pmd_actions, app_actions }; attr.group = 0; flow_1 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 4. after flow creation application does not need to keep the * tunnel action resources. */ rte_flow_tunnel_action_release(port_id, pmd_actions, num_pmd_actions); /** * 5. After partially offloaded packet miss because there was no * matching rule handle miss on group 3 */ struct rte_flow_restore_info info; rte_flow_get_restore_info(port_id, mbuf, &info, &error); /** * 6. Offload NAT rule: */ app_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 } app_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 } rte_flow_tunnel_match(&info.tunnel, &pmd_items, &num_pmd_items, &error); rule_items = {pmd_items, app_items}; rule_actions = app_actions; attr.group = info.group_id; flow_2 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 7. Release PMD items after rule creation */ rte_flow_tunnel_item_release(port_id, pmd_items, num_pmd_items); References 1. https://mails.dpdk.org/archives/dev/2020-June/index.html Signed-off-by: Eli Britstein <elibr@mellanox.com> Signed-off-by: Gregory Etelson <getelson@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2020-10-16 15:51:06 +03:00
(struct rte_eth_dev *dev,
struct rte_flow_action *pmd_actions,
uint32_t num_of_actions,
struct rte_flow_error *err);
/** See rte_flow_item_release() */
int (*tunnel_item_release)
ethdev: add tunnel offload model rte_flow API provides the building blocks for vendor-agnostic flow classification offloads. The rte_flow "patterns" and "actions" primitives are fine-grained, thus enabling DPDK applications the flexibility to offload network stacks and complex pipelines. Applications wishing to offload tunneled traffic are required to use the rte_flow primitives, such as group, meta, mark, tag, and others to model their high-level objects. The hardware model design for high-level software objects is not trivial. Furthermore, an optimal design is often vendor-specific. When hardware offloads tunneled traffic in multi-group logic, partially offloaded packets may arrive to the application after they were modified in hardware. In this case, the application may need to restore the original packet headers. Consider the following sequence: The application decaps a packet in one group and jumps to a second group where it tries to match on a 5-tuple, that will miss and send the packet to the application. In this case, the application does not receive the original packet but a modified one. Also, in this case, the application cannot match on the outer header fields, such as VXLAN vni and 5-tuple. There are several possible ways to use rte_flow "patterns" and "actions" to resolve the issues above. For example: 1 Mapping headers to a hardware registers using the rte_flow_action_mark/rte_flow_action_tag/rte_flow_set_meta objects. 2 Apply the decap only at the last offload stage after all the "patterns" were matched and the packet will be fully offloaded. Every approach has its pros and cons and is highly dependent on the hardware vendor. For example, some hardware may have a limited number of registers while other hardware could not support inner actions and must decap before accessing inner headers. The tunnel offload model resolves these issues. The model goals are: 1 Provide a unified application API to offload tunneled traffic that is capable to match on outer headers after decap. 2 Allow the application to restore the outer header of partially offloaded packets. The tunnel offload model does not introduce new elements to the existing RTE flow model and is implemented as a set of helper functions. For the application to work with the tunnel offload API it has to adjust flow rules in multi-table tunnel offload in the following way: 1 Remove explicit call to decap action and replace it with PMD actions obtained from rte_flow_tunnel_decap_and_set() helper. 2 Add PMD items obtained from rte_flow_tunnel_match() helper to all other rules in the tunnel offload sequence. VXLAN Code example: Assume application needs to do inner NAT on the VXLAN packet. The first rule in group 0: flow create <port id> ingress group 0 pattern eth / ipv4 / udp dst is 4789 / vxlan / end actions {pmd actions} / jump group 3 / end The first VXLAN packet that arrives matches the rule in group 0 and jumps to group 3. In group 3 the packet will miss since there is no flow to match and will be sent to the application. Application will call rte_flow_get_restore_info() to get the packet outer header. Application will insert a new rule in group 3 to match outer and inner headers: flow create <port id> ingress group 3 pattern {pmd items} / eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 / end actions set_ipv4_dst 186.1.1.1 / queue index 3 / end Resulting of the rules will be that VXLAN packet with vni=10, outer IPv4 dst=172.10.10.1 and inner IPv4 dst=184.1.2.3 will be received decapped on queue 3 with IPv4 dst=186.1.1.1 Note: The packet in group 3 is considered decapped. All actions in that group will be done on the header that was inner before decap. The application may specify an outer header to be matched on. It's PMD responsibility to translate these items to outer metadata. API usage: /** * 1. Initiate RTE flow tunnel object */ const struct rte_flow_tunnel tunnel = { .type = RTE_FLOW_ITEM_TYPE_VXLAN, .tun_id = 10, } /** * 2. Obtain PMD tunnel actions * * pmd_actions is an intermediate variable application uses to * compile actions array */ struct rte_flow_action **pmd_actions; rte_flow_tunnel_decap_and_set(&tunnel, &pmd_actions, &num_pmd_actions, &error); /** * 3. offload the first rule * matching on VXLAN traffic and jumps to group 3 * (implicitly decaps packet) */ app_actions = jump group 3 rule_items = app_items; /** eth / ipv4 / udp / vxlan */ rule_actions = { pmd_actions, app_actions }; attr.group = 0; flow_1 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 4. after flow creation application does not need to keep the * tunnel action resources. */ rte_flow_tunnel_action_release(port_id, pmd_actions, num_pmd_actions); /** * 5. After partially offloaded packet miss because there was no * matching rule handle miss on group 3 */ struct rte_flow_restore_info info; rte_flow_get_restore_info(port_id, mbuf, &info, &error); /** * 6. Offload NAT rule: */ app_items = { eth / ipv4 dst is 172.10.10.1 / udp dst 4789 / vxlan vni is 10 / ipv4 dst is 184.1.2.3 } app_actions = { set_ipv4_dst 186.1.1.1 / queue index 3 } rte_flow_tunnel_match(&info.tunnel, &pmd_items, &num_pmd_items, &error); rule_items = {pmd_items, app_items}; rule_actions = app_actions; attr.group = info.group_id; flow_2 = rte_flow_create(port_id, &attr, rule_items, rule_actions, &error); /** * 7. Release PMD items after rule creation */ rte_flow_tunnel_item_release(port_id, pmd_items, num_pmd_items); References 1. https://mails.dpdk.org/archives/dev/2020-June/index.html Signed-off-by: Eli Britstein <elibr@mellanox.com> Signed-off-by: Gregory Etelson <getelson@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2020-10-16 15:51:06 +03:00
(struct rte_eth_dev *dev,
struct rte_flow_item *pmd_items,
uint32_t num_of_items,
struct rte_flow_error *err);
};
/**
* Get generic flow operations structure from a port.
*
* @param port_id
* Port identifier to query.
* @param[out] error
* Pointer to flow error structure.
*
* @return
* The flow operations structure associated with port_id, NULL in case of
* error, in which case rte_errno is set and the error structure contains
* additional details.
*/
const struct rte_flow_ops *
rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error);
#ifdef __cplusplus
}
#endif
#endif /* RTE_FLOW_DRIVER_H_ */