net/mlx5: add pattern template management

The pattern template defines flows that have the same matching
fields but with different matching values.
For example, matching on 5 tuple TCP flow, the template will be
(eth(null) + IPv4(source + dest) + TCP(s_port + d_port) while
the values for each rule will be different.

Due to the pattern template can be used in different domains, the
items will only be cached in pattern template create stage, while
the template is bound to a dedicated table, the HW criteria will
be created and saved to the table. The pattern templates can be
used by multiple tables. But different tables create the same
criteria and will not share the matcher between each other in order
to have better performance.

This commit adds pattern template management.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Suanming Mou 2022-02-24 15:40:42 +02:00 committed by Raslan Darawsheh
parent b401400db2
commit 42431df924
4 changed files with 188 additions and 0 deletions

View File

@ -1502,6 +1502,8 @@ struct mlx5_priv {
/* Flex items have been created on the port. */
uint32_t flex_item_map; /* Map of allocated flex item elements. */
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
/* Item template list. */
LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt;
struct mlx5dr_context *dr_ctx; /**< HW steering DR context. */
uint32_t nb_queue; /* HW steering queue number. */
/* HW steering queue polling mechanism job descriptor LIFO. */

View File

@ -817,6 +817,17 @@ mlx5_flow_port_configure(struct rte_eth_dev *dev,
const struct rte_flow_queue_attr *queue_attr[],
struct rte_flow_error *err);
static struct rte_flow_pattern_template *
mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
const struct rte_flow_pattern_template_attr *attr,
const struct rte_flow_item items[],
struct rte_flow_error *error);
static int
mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
struct rte_flow_pattern_template *template,
struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@ -839,6 +850,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.flex_item_release = mlx5_flow_flex_item_release,
.info_get = mlx5_flow_info_get,
.configure = mlx5_flow_port_configure,
.pattern_template_create = mlx5_flow_pattern_template_create,
.pattern_template_destroy = mlx5_flow_pattern_template_destroy,
};
/* Tunnel information. */
@ -7924,6 +7937,69 @@ mlx5_flow_port_configure(struct rte_eth_dev *dev,
return fops->configure(dev, port_attr, nb_queue, queue_attr, error);
}
/**
* Create flow item template.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the item template attributes.
* @param[in] items
* The template item pattern.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static struct rte_flow_pattern_template *
mlx5_flow_pattern_template_create(struct rte_eth_dev *dev,
const struct rte_flow_pattern_template_attr *attr,
const struct rte_flow_item items[],
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"pattern create with incorrect steering mode");
return NULL;
}
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
return fops->pattern_template_create(dev, attr, items, error);
}
/**
* Destroy flow item template.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] template
* Pointer to the item template to be destroyed.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_pattern_template_destroy(struct rte_eth_dev *dev,
struct rte_flow_pattern_template *template,
struct rte_flow_error *error)
{
const struct mlx5_flow_driver_ops *fops;
if (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"pattern destroy with incorrect steering mode");
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
return fops->pattern_template_destroy(dev, template, error);
}
/**
* Allocate a new memory for the counter values wrapped by all the needed
* management.

View File

@ -1015,6 +1015,19 @@ struct rte_flow {
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
} __rte_packed;
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
/* Flow item template struct. */
struct rte_flow_pattern_template {
LIST_ENTRY(rte_flow_pattern_template) next;
/* Template attributes. */
struct rte_flow_pattern_template_attr attr;
struct mlx5dr_match_template *mt; /* mlx5 match template. */
uint32_t refcnt; /* Reference counter. */
};
#endif
/*
* Define list of valid combinations of RX Hash fields
* (see enum ibv_rx_hash_fields).
@ -1268,6 +1281,15 @@ typedef int (*mlx5_flow_port_configure_t)
uint16_t nb_queue,
const struct rte_flow_queue_attr *queue_attr[],
struct rte_flow_error *err);
typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
(struct rte_eth_dev *dev,
const struct rte_flow_pattern_template_attr *attr,
const struct rte_flow_item items[],
struct rte_flow_error *error);
typedef int (*mlx5_flow_pattern_template_destroy_t)
(struct rte_eth_dev *dev,
struct rte_flow_pattern_template *template,
struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@ -1308,6 +1330,8 @@ struct mlx5_flow_driver_ops {
mlx5_flow_item_update_t item_update;
mlx5_flow_info_get_t info_get;
mlx5_flow_port_configure_t configure;
mlx5_flow_pattern_template_create_t pattern_template_create;
mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
};
/* mlx5_flow.c */

View File

@ -13,6 +13,85 @@
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
/**
* Create flow item template.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the item template attributes.
* @param[in] items
* The template item pattern.
* @param[out] error
* Pointer to error structure.
*
* @return
* Item template pointer on success, NULL otherwise and rte_errno is set.
*/
static struct rte_flow_pattern_template *
flow_hw_pattern_template_create(struct rte_eth_dev *dev,
const struct rte_flow_pattern_template_attr *attr,
const struct rte_flow_item items[],
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_pattern_template *it;
it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
if (!it) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot allocate item template");
return NULL;
}
it->attr = *attr;
it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
if (!it->mt) {
mlx5_free(it);
rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot create match template");
return NULL;
}
__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
return it;
}
/**
* Destroy flow item template.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] template
* Pointer to the item template to be destroyed.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
struct rte_flow_pattern_template *template,
struct rte_flow_error *error __rte_unused)
{
if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"item template in using");
}
LIST_REMOVE(template, next);
claim_zero(mlx5dr_match_template_destroy(template->mt));
mlx5_free(template);
return 0;
}
/*
* Get information about HWS pre-configurable resources.
*
* @param[in] dev
@ -154,9 +233,14 @@ void
flow_hw_resource_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_pattern_template *it;
if (!priv->dr_ctx)
return;
while (!LIST_EMPTY(&priv->flow_hw_itt)) {
it = LIST_FIRST(&priv->flow_hw_itt);
flow_hw_pattern_template_destroy(dev, it, NULL);
}
mlx5_free(priv->hw_q);
priv->hw_q = NULL;
claim_zero(mlx5dr_context_close(priv->dr_ctx));
@ -167,6 +251,8 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
.pattern_template_create = flow_hw_pattern_template_create,
.pattern_template_destroy = flow_hw_pattern_template_destroy,
};
#endif