Raja Zidane 2700326085 regex/mlx5: refactor HW queue objects
The mlx5 PMD for regex class uses an MMO WQE operated by the GGA engine
in BF devices.
Currently, all the MMO WQEs are managed by the SQ object.
Starting from BF3, the queue of the MMO WQEs should be connected to the
GGA engine using a new configuration, MMO, that will be supported only
in the QP object.
The FW introduced new capabilities to define whether the MMO
configuration should be configured for the GGA queue.
Replace all the GGA queue objects to QP, set MMO configuration according
to the new FW capabilities.

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-10-05 18:15:40 +02:00

140 lines
4.4 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2020 Mellanox Technologies, Ltd
*/
#ifndef MLX5_REGEX_H
#define MLX5_REGEX_H
#include <rte_regexdev.h>
#include <infiniband/verbs.h>
#include <infiniband/mlx5dv.h>
#include <mlx5_common.h>
#include <mlx5_common_mr.h>
#include <mlx5_common_devx.h>
#include "mlx5_rxp.h"
#include "mlx5_regex_utils.h"
struct mlx5_regex_hw_qp {
uint16_t log_nb_desc; /* Log 2 number of desc for this object. */
struct mlx5_devx_qp qp_obj; /* The QP DevX object. */
size_t pi, db_pi;
size_t ci;
uint32_t qpn;
};
struct mlx5_regex_cq {
uint32_t log_nb_desc; /* Log 2 number of desc for this object. */
struct mlx5_devx_cq cq_obj; /* The CQ DevX object. */
size_t ci;
};
struct mlx5_regex_qp {
uint32_t flags; /* QP user flags. */
uint32_t nb_desc; /* Total number of desc for this qp. */
struct mlx5_regex_hw_qp *qps; /* Pointer to qp array. */
uint16_t nb_obj; /* Number of qp objects. */
struct mlx5_regex_cq cq; /* CQ struct. */
uint32_t free_qps;
struct mlx5_regex_job *jobs;
struct ibv_mr *metadata;
struct ibv_mr *outputs;
struct ibv_mr *imkey_addr; /* Indirect mkey array region. */
size_t ci, pi;
struct mlx5_mr_ctrl mr_ctrl;
};
struct mlx5_regex_db {
void *ptr; /* Pointer to the db memory. */
uint32_t len; /* The memory len. */
bool active; /* Active flag. */
uint8_t db_assigned_to_eng_num;
/**< To which engine the db is connected. */
struct mlx5_regex_umem umem;
/**< The umem struct. */
};
struct mlx5_regex_priv {
TAILQ_ENTRY(mlx5_regex_priv) next;
struct ibv_context *ctx; /* Device context. */
struct rte_regexdev *regexdev; /* Pointer to the RegEx dev. */
uint16_t nb_queues; /* Number of queues. */
struct mlx5_regex_qp *qps; /* Pointer to the qp array. */
uint16_t nb_max_matches; /* Max number of matches. */
enum mlx5_rxp_program_mode prog_mode;
struct mlx5_regex_db db[MLX5_RXP_MAX_ENGINES +
MLX5_RXP_EM_COUNT];
uint32_t nb_engines; /* Number of RegEx engines. */
struct mlx5dv_devx_uar *uar; /* UAR object. */
struct ibv_pd *pd;
TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;
/**< Called by memory event callback. */
struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
uint8_t is_bf2; /* The device is BF2 device. */
uint8_t qp_ts_format; /* Whether SQ supports timestamp formats. */
uint8_t has_umr; /* The device supports UMR. */
uint32_t mmo_regex_qp_cap:1;
uint32_t mmo_regex_sq_cap:1;
};
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
static inline int
regex_get_pdn(void *pd, uint32_t *pdn)
{
struct mlx5dv_obj obj;
struct mlx5dv_pd pd_info;
int ret = 0;
obj.pd.in = pd;
obj.pd.out = &pd_info;
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
if (ret) {
DRV_LOG(DEBUG, "Fail to get PD object info");
return ret;
}
*pdn = pd_info.pdn;
return 0;
}
#endif
/* mlx5_regex.c */
int mlx5_regex_start(struct rte_regexdev *dev);
int mlx5_regex_stop(struct rte_regexdev *dev);
int mlx5_regex_close(struct rte_regexdev *dev);
/* mlx5_rxp.c */
int mlx5_regex_info_get(struct rte_regexdev *dev,
struct rte_regexdev_info *info);
int mlx5_regex_configure(struct rte_regexdev *dev,
const struct rte_regexdev_config *cfg);
int mlx5_regex_rules_db_import(struct rte_regexdev *dev,
const char *rule_db, uint32_t rule_db_len);
/* mlx5_regex_devx.c */
int mlx5_devx_regex_register_write(struct ibv_context *ctx, int engine_id,
uint32_t addr, uint32_t data);
int mlx5_devx_regex_register_read(struct ibv_context *ctx, int engine_id,
uint32_t addr, uint32_t *data);
int mlx5_devx_regex_database_stop(void *ctx, uint8_t engine);
int mlx5_devx_regex_database_resume(void *ctx, uint8_t engine);
int mlx5_devx_regex_database_program(void *ctx, uint8_t engine,
uint32_t umem_id, uint64_t umem_offset);
/* mlx5_regex_control.c */
int mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
const struct rte_regexdev_qp_conf *cfg);
/* mlx5_regex_fastpath.c */
int mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id);
void mlx5_regexdev_teardown_fastpath(struct mlx5_regex_priv *priv,
uint32_t qp_id);
uint16_t mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id,
struct rte_regex_ops **ops, uint16_t nb_ops);
uint16_t mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id,
struct rte_regex_ops **ops, uint16_t nb_ops);
uint16_t mlx5_regexdev_enqueue_gga(struct rte_regexdev *dev, uint16_t qp_id,
struct rte_regex_ops **ops, uint16_t nb_ops);
#endif /* MLX5_REGEX_H */