numam-dpdk/drivers/net/mlx5/mlx5_flow_hw.c
Suanming Mou 42431df924 net/mlx5: add pattern template management
The pattern template defines flows that have the same matching
fields but with different matching values.
For example, matching on 5 tuple TCP flow, the template will be
(eth(null) + IPv4(source + dest) + TCP(s_port + d_port) while
the values for each rule will be different.

Due to the pattern template can be used in different domains, the
items will only be cached in pattern template create stage, while
the template is bound to a dedicated table, the HW criteria will
be created and saved to the table. The pattern templates can be
used by multiple tables. But different tables create the same
criteria and will not share the matcher between each other in order
to have better performance.

This commit adds pattern template management.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-02-24 22:10:18 +01:00

259 lines
6.9 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2022 NVIDIA Corporation & Affiliates
*/
#include <rte_flow.h>
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5_flow.h"
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
/**
* Create flow item template.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the item template attributes.
* @param[in] items
* The template item pattern.
* @param[out] error
* Pointer to error structure.
*
* @return
* Item template pointer on success, NULL otherwise and rte_errno is set.
*/
static struct rte_flow_pattern_template *
flow_hw_pattern_template_create(struct rte_eth_dev *dev,
const struct rte_flow_pattern_template_attr *attr,
const struct rte_flow_item items[],
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_pattern_template *it;
it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
if (!it) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot allocate item template");
return NULL;
}
it->attr = *attr;
it->mt = mlx5dr_match_template_create(items, attr->relaxed_matching);
if (!it->mt) {
mlx5_free(it);
rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"cannot create match template");
return NULL;
}
__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
return it;
}
/**
* Destroy flow item template.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] template
* Pointer to the item template to be destroyed.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
struct rte_flow_pattern_template *template,
struct rte_flow_error *error __rte_unused)
{
if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
DRV_LOG(WARNING, "Item template %p is still in use.",
(void *)template);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"item template in using");
}
LIST_REMOVE(template, next);
claim_zero(mlx5dr_match_template_destroy(template->mt));
mlx5_free(template);
return 0;
}
/*
* Get information about HWS pre-configurable resources.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[out] port_info
* Pointer to port information.
* @param[out] queue_info
* Pointer to queue information.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,
struct rte_flow_port_info *port_info __rte_unused,
struct rte_flow_queue_info *queue_info __rte_unused,
struct rte_flow_error *error __rte_unused)
{
/* Nothing to be updated currently. */
memset(port_info, 0, sizeof(*port_info));
/* Queue size is unlimited from low-level. */
queue_info->max_size = UINT32_MAX;
return 0;
}
/**
* Configure port HWS resources.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
* @param[in] port_attr
* Port configuration attributes.
* @param[in] nb_queue
* Number of queue.
* @param[in] queue_attr
* Array that holds attributes for each flow queue.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_hw_configure(struct rte_eth_dev *dev,
const struct rte_flow_port_attr *port_attr,
uint16_t nb_queue,
const struct rte_flow_queue_attr *queue_attr[],
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5dr_context *dr_ctx = NULL;
struct mlx5dr_context_attr dr_ctx_attr = {0};
struct mlx5_hw_q *hw_q;
struct mlx5_hw_q_job *job = NULL;
uint32_t mem_size, i, j;
if (!port_attr || !nb_queue || !queue_attr) {
rte_errno = EINVAL;
goto err;
}
/* In case re-configuring, release existing context at first. */
if (priv->dr_ctx) {
/* */
for (i = 0; i < nb_queue; i++) {
hw_q = &priv->hw_q[i];
/* Make sure all queues are empty. */
if (hw_q->size != hw_q->job_idx) {
rte_errno = EBUSY;
goto err;
}
}
flow_hw_resource_release(dev);
}
/* Allocate the queue job descriptor LIFO. */
mem_size = sizeof(priv->hw_q[0]) * nb_queue;
for (i = 0; i < nb_queue; i++) {
/*
* Check if the queues' size are all the same as the
* limitation from HWS layer.
*/
if (queue_attr[i]->size != queue_attr[0]->size) {
rte_errno = EINVAL;
goto err;
}
mem_size += (sizeof(struct mlx5_hw_q_job *) +
sizeof(struct mlx5_hw_q_job)) *
queue_attr[0]->size;
}
priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
64, SOCKET_ID_ANY);
if (!priv->hw_q) {
rte_errno = ENOMEM;
goto err;
}
for (i = 0; i < nb_queue; i++) {
priv->hw_q[i].job_idx = queue_attr[i]->size;
priv->hw_q[i].size = queue_attr[i]->size;
if (i == 0)
priv->hw_q[i].job = (struct mlx5_hw_q_job **)
&priv->hw_q[nb_queue];
else
priv->hw_q[i].job = (struct mlx5_hw_q_job **)
&job[queue_attr[i - 1]->size];
job = (struct mlx5_hw_q_job *)
&priv->hw_q[i].job[queue_attr[i]->size];
for (j = 0; j < queue_attr[i]->size; j++)
priv->hw_q[i].job[j] = &job[j];
}
dr_ctx_attr.pd = priv->sh->cdev->pd;
dr_ctx_attr.queues = nb_queue;
/* Queue size should all be the same. Take the first one. */
dr_ctx_attr.queue_size = queue_attr[0]->size;
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
if (!dr_ctx)
goto err;
priv->dr_ctx = dr_ctx;
priv->nb_queue = nb_queue;
return 0;
err:
if (dr_ctx)
claim_zero(mlx5dr_context_close(dr_ctx));
mlx5_free(priv->hw_q);
priv->hw_q = NULL;
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"fail to configure port");
}
/**
* Release HWS resources.
*
* @param[in] dev
* Pointer to the rte_eth_dev structure.
*/
void
flow_hw_resource_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow_pattern_template *it;
if (!priv->dr_ctx)
return;
while (!LIST_EMPTY(&priv->flow_hw_itt)) {
it = LIST_FIRST(&priv->flow_hw_itt);
flow_hw_pattern_template_destroy(dev, it, NULL);
}
mlx5_free(priv->hw_q);
priv->hw_q = NULL;
claim_zero(mlx5dr_context_close(priv->dr_ctx));
priv->dr_ctx = NULL;
priv->nb_queue = 0;
}
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
.pattern_template_create = flow_hw_pattern_template_create,
.pattern_template_destroy = flow_hw_pattern_template_destroy,
};
#endif