regex/cn9k: use cnxk infrastructure

update driver to use the REE cnxk code
replace octeontx2/otx2 with cn9k

Signed-off-by: Liron Himi <lironh@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Liron Himi 2021-12-11 14:34:34 +05:30 committed by Thomas Monjalon
parent c88d3638c7
commit 72c00ae9db
21 changed files with 273 additions and 1205 deletions

View File

@ -1238,11 +1238,11 @@ F: doc/guides/dmadevs/dpaa.rst
RegEx Drivers
-------------
Marvell OCTEON TX2 regex
Marvell OCTEON CN9K regex
M: Liron Himi <lironh@marvell.com>
F: drivers/regex/octeontx2/
F: doc/guides/regexdevs/octeontx2.rst
F: doc/guides/regexdevs/features/octeontx2.ini
F: drivers/regex/cn9k/
F: doc/guides/regexdevs/cn9k.rst
F: doc/guides/regexdevs/features/cn9k.ini
Mellanox mlx5
M: Ori Kam <orika@nvidia.com>

View File

@ -48,6 +48,10 @@ for dump in $(find $refdir -name "*.dump"); do
echo "Skipped removed driver $name."
continue
fi
if grep -qE "\<librte_regex_octeontx2" $dump; then
echo "Skipped removed driver $name."
continue
fi
dump2=$(find $newdir -name $name)
if [ -z "$dump2" ] || [ ! -e "$dump2" ]; then
echo "Error: cannot find $name in $newdir" >&2

View File

@ -156,6 +156,9 @@ This section lists dataplane H/W block(s) available in cnxk SoC.
#. **Dmadev Driver**
See :doc:`../dmadevs/cnxk` for DPI Dmadev driver information.
#. **Regex Device Driver**
See :doc:`../regexdevs/cn9k` for REE Regex device driver information.
Procedure to Setup Platform
---------------------------

View File

@ -155,9 +155,6 @@ This section lists dataplane H/W block(s) available in OCTEON TX2 SoC.
#. **Crypto Device Driver**
See :doc:`../cryptodevs/octeontx2` for CPT crypto device driver information.
#. **Regex Device Driver**
See :doc:`../regexdevs/octeontx2` for REE regex device driver information.
Procedure to Setup Platform
---------------------------

View File

@ -1,20 +1,20 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2020 Marvell International Ltd.
OCTEON TX2 REE Regexdev Driver
CN9K REE Regexdev Driver
==============================
The OCTEON TX2 REE PMD (**librte_regex_octeontx2**) provides poll mode
regexdev driver support for the inbuilt regex device found in the **Marvell OCTEON TX2**
The CN9K REE PMD (**librte_regex_cn9k**) provides poll mode
regexdev driver support for the inbuilt regex device found in the **Marvell CN9K**
SoC family.
More information about OCTEON TX2 SoC can be found at `Marvell Official Website
More information about CN9K SoC can be found at `Marvell Official Website
<https://www.marvell.com/embedded-processors/infrastructure-processors/>`_.
Features
--------
Features of the OCTEON TX2 REE PMD are:
Features of the CN9K REE PMD are:
- 36 queues
- Up to 254 matches for each regex operation
@ -22,12 +22,12 @@ Features of the OCTEON TX2 REE PMD are:
Prerequisites and Compilation procedure
---------------------------------------
See :doc:`../platform/octeontx2` for setup information.
See :doc:`../platform/cnxk` for setup information.
Device Setup
------------
The OCTEON TX2 REE devices will need to be bound to a user-space IO driver
The CN9K REE devices will need to be bound to a user-space IO driver
for use. The script ``dpdk-devbind.py`` script included with DPDK can be
used to view the state of the devices and to bind them to a suitable
DPDK-supported kernel driver. When querying the status of the devices,
@ -38,12 +38,12 @@ those devices alone.
Debugging Options
-----------------
.. _table_octeontx2_regex_debug_options:
.. _table_cn9k_regex_debug_options:
.. table:: OCTEON TX2 regex device debug options
.. table:: CN9K regex device debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | REE | --log-level='pmd\.regex\.octeontx2,8' |
| 1 | REE | --log-level='pmd\.regex\.cn9k,8' |
+---+------------+-------------------------------------------------------+

View File

@ -1,5 +1,5 @@
;
; Supported features of the 'octeontx2' regex driver.
; Supported features of the 'cn9k' regex driver.
;
; Refer to default.ini for the full list of available driver features.
;

View File

@ -12,5 +12,5 @@ which can be used from an application through RegEx API.
:numbered:
features_overview
cn9k
mlx5
octeontx2

View File

@ -290,7 +290,7 @@ New Features
Added a new PMD for the hardware regex offload block for OCTEON TX2 SoC.
See the :doc:`../regexdevs/octeontx2` for more details.
See ``regexdevs/octeontx2`` for more details.
* **Updated Software Eventdev driver.**

View File

@ -13,12 +13,8 @@
/* REE common headers */
#include "otx2_common.h"
#include "otx2_dev.h"
#include "otx2_regexdev.h"
#include "otx2_regexdev_compiler.h"
#include "otx2_regexdev_hw_access.h"
#include "otx2_regexdev_mbox.h"
#include "cn9k_regexdev.h"
#include "cn9k_regexdev_compiler.h"
/* HW matches are at offset 0x80 from RES_PTR_ADDR
@ -35,9 +31,6 @@
#define REE_MAX_RULES_PER_GROUP 0xFFFF
#define REE_MAX_GROUPS 0xFFFF
/* This is temporarily here */
#define REE0_PF 19
#define REE1_PF 20
#define REE_RULE_DB_VERSION 2
#define REE_RULE_DB_REVISION 0
@ -58,32 +51,32 @@ struct ree_rule_db {
static void
qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
{
snprintf(name, size, "otx2_ree_lf_mem_%u:%u", dev_id, qp_id);
snprintf(name, size, "cn9k_ree_lf_mem_%u:%u", dev_id, qp_id);
}
static struct otx2_ree_qp *
static struct roc_ree_qp *
ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct cn9k_ree_data *data = dev->data->dev_private;
uint64_t pg_sz = sysconf(_SC_PAGESIZE);
struct otx2_ree_vf *vf = &data->vf;
struct roc_ree_vf *vf = &data->vf;
const struct rte_memzone *lf_mem;
uint32_t len, iq_len, size_div2;
char name[RTE_MEMZONE_NAMESIZE];
uint64_t used_len, iova;
struct otx2_ree_qp *qp;
struct roc_ree_qp *qp;
uint8_t *va;
int ret;
/* Allocate queue pair */
qp = rte_zmalloc("OCTEON TX2 Regex PMD Queue Pair", sizeof(*qp),
OTX2_ALIGN);
qp = rte_zmalloc("CN9K Regex PMD Queue Pair", sizeof(*qp),
ROC_ALIGN);
if (qp == NULL) {
otx2_err("Could not allocate queue pair");
cn9k_err("Could not allocate queue pair");
return NULL;
}
iq_len = OTX2_REE_IQ_LEN;
iq_len = REE_IQ_LEN;
/*
* Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
@ -93,13 +86,13 @@ ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
size_div2 = iq_len >> 1;
/* For pending queue */
len = iq_len * RTE_ALIGN(sizeof(struct otx2_ree_rid), 8);
len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
/* So that instruction queues start as pg size aligned */
len = RTE_ALIGN(len, pg_sz);
/* For instruction queues */
len += OTX2_REE_IQ_LEN * sizeof(union otx2_ree_inst);
len += REE_IQ_LEN * sizeof(union roc_ree_inst);
/* Waste after instruction queues */
len = RTE_ALIGN(len, pg_sz);
@ -107,11 +100,11 @@ ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
qp_id);
lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
lf_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
RTE_CACHE_LINE_SIZE);
if (lf_mem == NULL) {
otx2_err("Could not allocate reserved memzone");
cn9k_err("Could not allocate reserved memzone");
goto qp_free;
}
@ -121,24 +114,24 @@ ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
memset(va, 0, len);
/* Initialize pending queue */
qp->pend_q.rid_queue = (struct otx2_ree_rid *)va;
qp->pend_q.rid_queue = (struct roc_ree_rid *)va;
qp->pend_q.enq_tail = 0;
qp->pend_q.deq_head = 0;
qp->pend_q.pending_count = 0;
used_len = iq_len * RTE_ALIGN(sizeof(struct otx2_ree_rid), 8);
used_len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
used_len = RTE_ALIGN(used_len, pg_sz);
iova += used_len;
qp->iq_dma_addr = iova;
qp->id = qp_id;
qp->base = OTX2_REE_LF_BAR2(vf, qp_id);
qp->otx2_regexdev_jobid = 0;
qp->base = roc_ree_qp_get_base(vf, qp_id);
qp->roc_regexdev_jobid = 0;
qp->write_offset = 0;
ret = otx2_ree_iq_enable(dev, qp, OTX2_REE_QUEUE_HI_PRIO, size_div2);
ret = roc_ree_iq_enable(vf, qp, REE_QUEUE_HI_PRIO, size_div2);
if (ret) {
otx2_err("Could not enable instruction queue");
cn9k_err("Could not enable instruction queue");
goto qp_free;
}
@ -150,13 +143,13 @@ ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
}
static int
ree_qp_destroy(const struct rte_regexdev *dev, struct otx2_ree_qp *qp)
ree_qp_destroy(const struct rte_regexdev *dev, struct roc_ree_qp *qp)
{
const struct rte_memzone *lf_mem;
char name[RTE_MEMZONE_NAMESIZE];
int ret;
otx2_ree_iq_disable(qp);
roc_ree_iq_disable(qp);
qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
qp->id);
@ -175,8 +168,8 @@ ree_qp_destroy(const struct rte_regexdev *dev, struct otx2_ree_qp *qp)
static int
ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_qp *qp = data->queue_pairs[qp_id];
int ret;
ree_func_trace("Queue=%d", qp_id);
@ -186,7 +179,7 @@ ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
ret = ree_qp_destroy(dev, qp);
if (ret) {
otx2_err("Could not destroy queue pair %d", qp_id);
cn9k_err("Could not destroy queue pair %d", qp_id);
return ret;
}
@ -200,12 +193,12 @@ ree_dev_register(const char *name)
{
struct rte_regexdev *dev;
otx2_ree_dbg("Creating regexdev %s\n", name);
cn9k_ree_dbg("Creating regexdev %s\n", name);
/* allocate device structure */
dev = rte_regexdev_register(name);
if (dev == NULL) {
otx2_err("Failed to allocate regex device for %s", name);
cn9k_err("Failed to allocate regex device for %s", name);
return NULL;
}
@ -213,12 +206,12 @@ ree_dev_register(const char *name)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
dev->data->dev_private =
rte_zmalloc_socket("regexdev device private",
sizeof(struct otx2_ree_data),
sizeof(struct cn9k_ree_data),
RTE_CACHE_LINE_SIZE,
rte_socket_id());
if (dev->data->dev_private == NULL) {
otx2_err("Cannot allocate memory for dev %s private data",
cn9k_err("Cannot allocate memory for dev %s private data",
name);
rte_regexdev_unregister(dev);
@ -232,7 +225,7 @@ ree_dev_register(const char *name)
static int
ree_dev_unregister(struct rte_regexdev *dev)
{
otx2_ree_dbg("Closing regex device %s", dev->device->name);
cn9k_ree_dbg("Closing regex device %s", dev->device->name);
/* free regex device */
rte_regexdev_unregister(dev);
@ -246,8 +239,8 @@ ree_dev_unregister(struct rte_regexdev *dev)
static int
ree_dev_fini(struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct rte_pci_device *pci_dev;
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_vf *vf = &data->vf;
int i, ret;
ree_func_trace();
@ -258,9 +251,9 @@ ree_dev_fini(struct rte_regexdev *dev)
return ret;
}
ret = otx2_ree_queues_detach(dev);
ret = roc_ree_queues_detach(vf);
if (ret)
otx2_err("Could not detach queues");
cn9k_err("Could not detach queues");
/* TEMP : should be in lib */
if (data->queue_pairs)
@ -268,33 +261,32 @@ ree_dev_fini(struct rte_regexdev *dev)
if (data->rules)
rte_free(data->rules);
pci_dev = container_of(dev->device, struct rte_pci_device, device);
otx2_dev_fini(pci_dev, &(data->vf.otx2_dev));
roc_ree_dev_fini(vf);
ret = ree_dev_unregister(dev);
if (ret)
otx2_err("Could not destroy PMD");
cn9k_err("Could not destroy PMD");
return ret;
}
static inline int
ree_enqueue(struct otx2_ree_qp *qp, struct rte_regex_ops *op,
struct otx2_ree_pending_queue *pend_q)
ree_enqueue(struct roc_ree_qp *qp, struct rte_regex_ops *op,
struct roc_ree_pending_queue *pend_q)
{
union otx2_ree_inst inst;
union otx2_ree_res *res;
union roc_ree_inst inst;
union ree_res *res;
uint32_t offset;
if (unlikely(pend_q->pending_count >= OTX2_REE_DEFAULT_CMD_QLEN)) {
otx2_err("Pending count %" PRIu64 " is greater than Q size %d",
pend_q->pending_count, OTX2_REE_DEFAULT_CMD_QLEN);
if (unlikely(pend_q->pending_count >= REE_DEFAULT_CMD_QLEN)) {
cn9k_err("Pending count %" PRIu64 " is greater than Q size %d",
pend_q->pending_count, REE_DEFAULT_CMD_QLEN);
return -EAGAIN;
}
if (unlikely(op->mbuf->data_len > OTX2_REE_MAX_PAYLOAD_SIZE ||
if (unlikely(op->mbuf->data_len > REE_MAX_PAYLOAD_SIZE ||
op->mbuf->data_len == 0)) {
otx2_err("Packet length %d is greater than MAX payload %d",
op->mbuf->data_len, OTX2_REE_MAX_PAYLOAD_SIZE);
cn9k_err("Packet length %d is greater than MAX payload %d",
op->mbuf->data_len, REE_MAX_PAYLOAD_SIZE);
return -EAGAIN;
}
@ -324,7 +316,7 @@ ree_enqueue(struct otx2_ree_qp *qp, struct rte_regex_ops *op,
inst.cn98xx.ree_job_ctrl = (0x1 << 8);
else
inst.cn98xx.ree_job_ctrl = 0;
inst.cn98xx.ree_job_id = qp->otx2_regexdev_jobid;
inst.cn98xx.ree_job_id = qp->roc_regexdev_jobid;
/* W 7 */
inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
@ -348,33 +340,33 @@ ree_enqueue(struct otx2_ree_qp *qp, struct rte_regex_ops *op,
pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
/* Mark result as not done */
res = (union otx2_ree_res *)(op);
res = (union ree_res *)(op);
res->s.done = 0;
res->s.ree_err = 0;
/* We will use soft queue length here to limit requests */
REE_MOD_INC(pend_q->enq_tail, OTX2_REE_DEFAULT_CMD_QLEN);
REE_MOD_INC(pend_q->enq_tail, REE_DEFAULT_CMD_QLEN);
pend_q->pending_count += 1;
REE_MOD_INC(qp->otx2_regexdev_jobid, 0xFFFFFF);
REE_MOD_INC(qp->write_offset, OTX2_REE_IQ_LEN);
REE_MOD_INC(qp->roc_regexdev_jobid, 0xFFFFFF);
REE_MOD_INC(qp->write_offset, REE_IQ_LEN);
return 0;
}
static uint16_t
otx2_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
cn9k_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
struct rte_regex_ops **ops, uint16_t nb_ops)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
struct otx2_ree_pending_queue *pend_q;
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_qp *qp = data->queue_pairs[qp_id];
struct roc_ree_pending_queue *pend_q;
uint16_t nb_allowed, count = 0;
struct rte_regex_ops *op;
int ret;
pend_q = &qp->pend_q;
nb_allowed = OTX2_REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
nb_allowed = REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
if (nb_ops > nb_allowed)
nb_ops = nb_allowed;
@ -392,7 +384,7 @@ otx2_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
rte_io_wmb();
/* Update Doorbell */
otx2_write64(count, qp->base + OTX2_REE_LF_DOORBELL);
plt_write64(count, qp->base + REE_LF_DOORBELL);
return count;
}
@ -422,15 +414,15 @@ ree_dequeue_post_process(struct rte_regex_ops *ops)
}
if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
if (ree_res_status & OTX2_REE_STATUS_PMI_SOJ_BIT)
if (ree_res_status & REE_STATUS_PMI_SOJ_BIT)
ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
if (ree_res_status & OTX2_REE_STATUS_PMI_EOJ_BIT)
if (ree_res_status & REE_STATUS_PMI_EOJ_BIT)
ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
if (ree_res_status & OTX2_REE_STATUS_ML_CNT_DET_BIT)
if (ree_res_status & REE_STATUS_ML_CNT_DET_BIT)
ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
if (ree_res_status & OTX2_REE_STATUS_MM_CNT_DET_BIT)
if (ree_res_status & REE_STATUS_MM_CNT_DET_BIT)
ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
if (ree_res_status & OTX2_REE_STATUS_MP_CNT_DET_BIT)
if (ree_res_status & REE_STATUS_MP_CNT_DET_BIT)
ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
}
if (ops->nb_matches > 0) {
@ -439,22 +431,22 @@ ree_dequeue_post_process(struct rte_regex_ops *ops)
ops->nb_matches : REE_NUM_MATCHES_ALIGN);
match = (uint64_t)ops + REE_MATCH_OFFSET;
match += (ops->nb_matches - off) *
sizeof(union otx2_ree_match);
sizeof(union ree_match);
memcpy((void *)ops->matches, (void *)match,
off * sizeof(union otx2_ree_match));
off * sizeof(union ree_match));
}
}
static uint16_t
otx2_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
cn9k_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
struct rte_regex_ops **ops, uint16_t nb_ops)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
struct otx2_ree_pending_queue *pend_q;
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_qp *qp = data->queue_pairs[qp_id];
struct roc_ree_pending_queue *pend_q;
int i, nb_pending, nb_completed = 0;
volatile struct ree_res_s_98 *res;
struct otx2_ree_rid *rid;
struct roc_ree_rid *rid;
pend_q = &qp->pend_q;
@ -474,7 +466,7 @@ otx2_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
ops[i] = (struct rte_regex_ops *)(rid->rid);
ops[i]->user_id = rid->user_id;
REE_MOD_INC(pend_q->deq_head, OTX2_REE_DEFAULT_CMD_QLEN);
REE_MOD_INC(pend_q->deq_head, REE_DEFAULT_CMD_QLEN);
pend_q->pending_count -= 1;
}
@ -487,10 +479,10 @@ otx2_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
}
static int
otx2_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
cn9k_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_vf *vf = &data->vf;
ree_func_trace();
@ -502,7 +494,7 @@ otx2_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
info->max_queue_pairs = vf->max_queues;
info->max_matches = vf->max_matches;
info->max_payload_size = OTX2_REE_MAX_PAYLOAD_SIZE;
info->max_payload_size = REE_MAX_PAYLOAD_SIZE;
info->max_rules_per_group = data->max_rules_per_group;
info->max_groups = data->max_groups;
info->regexdev_capa = data->regexdev_capa;
@ -512,11 +504,11 @@ otx2_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
}
static int
otx2_ree_dev_config(struct rte_regexdev *dev,
cn9k_ree_dev_config(struct rte_regexdev *dev,
const struct rte_regexdev_config *cfg)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_vf *vf = &data->vf;
const struct ree_rule_db *rule_db;
uint32_t rule_db_len;
int ret;
@ -524,29 +516,29 @@ otx2_ree_dev_config(struct rte_regexdev *dev,
ree_func_trace();
if (cfg->nb_queue_pairs > vf->max_queues) {
otx2_err("Invalid number of queue pairs requested");
cn9k_err("Invalid number of queue pairs requested");
return -EINVAL;
}
if (cfg->nb_max_matches != vf->max_matches) {
otx2_err("Invalid number of max matches requested");
cn9k_err("Invalid number of max matches requested");
return -EINVAL;
}
if (cfg->dev_cfg_flags != 0) {
otx2_err("Invalid device configuration flags requested");
cn9k_err("Invalid device configuration flags requested");
return -EINVAL;
}
/* Unregister error interrupts */
if (vf->err_intr_registered)
otx2_ree_err_intr_unregister(dev);
roc_ree_err_intr_unregister(vf);
/* Detach queues */
if (vf->nb_queues) {
ret = otx2_ree_queues_detach(dev);
ret = roc_ree_queues_detach(vf);
if (ret) {
otx2_err("Could not detach REE queues");
cn9k_err("Could not detach REE queues");
return ret;
}
}
@ -559,7 +551,7 @@ otx2_ree_dev_config(struct rte_regexdev *dev,
if (data->queue_pairs == NULL) {
data->nb_queue_pairs = 0;
otx2_err("Failed to get memory for qp meta data, nb_queues %u",
cn9k_err("Failed to get memory for qp meta data, nb_queues %u",
cfg->nb_queue_pairs);
return -ENOMEM;
}
@ -579,7 +571,7 @@ otx2_ree_dev_config(struct rte_regexdev *dev,
qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
RTE_CACHE_LINE_SIZE);
if (qp == NULL) {
otx2_err("Failed to realloc qp meta data, nb_queues %u",
cn9k_err("Failed to realloc qp meta data, nb_queues %u",
cfg->nb_queue_pairs);
return -ENOMEM;
}
@ -594,52 +586,52 @@ otx2_ree_dev_config(struct rte_regexdev *dev,
data->nb_queue_pairs = cfg->nb_queue_pairs;
/* Attach queues */
otx2_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
ret = otx2_ree_queues_attach(dev, cfg->nb_queue_pairs);
cn9k_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
ret = roc_ree_queues_attach(vf, cfg->nb_queue_pairs);
if (ret) {
otx2_err("Could not attach queues");
cn9k_err("Could not attach queues");
return -ENODEV;
}
ret = otx2_ree_msix_offsets_get(dev);
ret = roc_ree_msix_offsets_get(vf);
if (ret) {
otx2_err("Could not get MSI-X offsets");
cn9k_err("Could not get MSI-X offsets");
goto queues_detach;
}
if (cfg->rule_db && cfg->rule_db_len) {
otx2_ree_dbg("rule_db length %d", cfg->rule_db_len);
cn9k_ree_dbg("rule_db length %d", cfg->rule_db_len);
rule_db = (const struct ree_rule_db *)cfg->rule_db;
rule_db_len = rule_db->number_of_entries *
sizeof(struct ree_rule_db_entry);
otx2_ree_dbg("rule_db number of entries %d",
cn9k_ree_dbg("rule_db number of entries %d",
rule_db->number_of_entries);
if (rule_db_len > cfg->rule_db_len) {
otx2_err("Could not program rule db");
cn9k_err("Could not program rule db");
ret = -EINVAL;
goto queues_detach;
}
ret = otx2_ree_rule_db_prog(dev, (const char *)rule_db->entries,
rule_db_len, NULL, OTX2_REE_NON_INC_PROG);
ret = roc_ree_rule_db_prog(vf, (const char *)rule_db->entries,
rule_db_len, NULL, REE_NON_INC_PROG);
if (ret) {
otx2_err("Could not program rule db");
cn9k_err("Could not program rule db");
goto queues_detach;
}
}
dev->enqueue = otx2_ree_enqueue_burst;
dev->dequeue = otx2_ree_dequeue_burst;
dev->enqueue = cn9k_ree_enqueue_burst;
dev->dequeue = cn9k_ree_dequeue_burst;
rte_mb();
return 0;
queues_detach:
otx2_ree_queues_detach(dev);
roc_ree_queues_detach(vf);
return ret;
}
static int
otx2_ree_stop(struct rte_regexdev *dev)
cn9k_ree_stop(struct rte_regexdev *dev)
{
RTE_SET_USED(dev);
@ -648,18 +640,20 @@ otx2_ree_stop(struct rte_regexdev *dev)
}
static int
otx2_ree_start(struct rte_regexdev *dev)
cn9k_ree_start(struct rte_regexdev *dev)
{
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_vf *vf = &data->vf;
uint32_t rule_db_len = 0;
int ret;
ree_func_trace();
ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, NULL);
ret = roc_ree_rule_db_len_get(vf, &rule_db_len, NULL);
if (ret)
return ret;
if (rule_db_len == 0) {
otx2_err("Rule db not programmed");
cn9k_err("Rule db not programmed");
return -EFAULT;
}
@ -667,56 +661,55 @@ otx2_ree_start(struct rte_regexdev *dev)
}
static int
otx2_ree_close(struct rte_regexdev *dev)
cn9k_ree_close(struct rte_regexdev *dev)
{
return ree_dev_fini(dev);
}
static int
otx2_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
cn9k_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
const struct rte_regexdev_qp_conf *qp_conf)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_qp *qp;
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_qp *qp;
ree_func_trace("Queue=%d", qp_id);
if (data->queue_pairs[qp_id] != NULL)
ree_queue_pair_release(dev, qp_id);
if (qp_conf->nb_desc > OTX2_REE_DEFAULT_CMD_QLEN) {
otx2_err("Could not setup queue pair for %u descriptors",
if (qp_conf->nb_desc > REE_DEFAULT_CMD_QLEN) {
cn9k_err("Could not setup queue pair for %u descriptors",
qp_conf->nb_desc);
return -EINVAL;
}
if (qp_conf->qp_conf_flags != 0) {
otx2_err("Could not setup queue pair with configuration flags 0x%x",
cn9k_err("Could not setup queue pair with configuration flags 0x%x",
qp_conf->qp_conf_flags);
return -EINVAL;
}
qp = ree_qp_create(dev, qp_id);
if (qp == NULL) {
otx2_err("Could not create queue pair %d", qp_id);
cn9k_err("Could not create queue pair %d", qp_id);
return -ENOMEM;
}
qp->cb = qp_conf->cb;
data->queue_pairs[qp_id] = qp;
return 0;
}
static int
otx2_ree_rule_db_compile_activate(struct rte_regexdev *dev)
cn9k_ree_rule_db_compile_activate(struct rte_regexdev *dev)
{
return otx2_ree_rule_db_compile_prog(dev);
return cn9k_ree_rule_db_compile_prog(dev);
}
static int
otx2_ree_rule_db_update(struct rte_regexdev *dev,
cn9k_ree_rule_db_update(struct rte_regexdev *dev,
const struct rte_regexdev_rule *rules, uint16_t nb_rules)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct cn9k_ree_data *data = dev->data->dev_private;
struct rte_regexdev_rule *old_ptr;
uint32_t i, sum_nb_rules;
@ -770,10 +763,11 @@ otx2_ree_rule_db_update(struct rte_regexdev *dev,
}
static int
otx2_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
cn9k_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
uint32_t rule_db_len)
{
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_vf *vf = &data->vf;
const struct ree_rule_db *ree_rule_db;
uint32_t ree_rule_db_len;
int ret;
@ -784,21 +778,23 @@ otx2_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
ree_rule_db_len = ree_rule_db->number_of_entries *
sizeof(struct ree_rule_db_entry);
if (ree_rule_db_len > rule_db_len) {
otx2_err("Could not program rule db");
cn9k_err("Could not program rule db");
return -EINVAL;
}
ret = otx2_ree_rule_db_prog(dev, (const char *)ree_rule_db->entries,
ree_rule_db_len, NULL, OTX2_REE_NON_INC_PROG);
ret = roc_ree_rule_db_prog(vf, (const char *)ree_rule_db->entries,
ree_rule_db_len, NULL, REE_NON_INC_PROG);
if (ret) {
otx2_err("Could not program rule db");
cn9k_err("Could not program rule db");
return -ENOSPC;
}
return 0;
}
static int
otx2_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
cn9k_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
{
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_vf *vf = &data->vf;
struct ree_rule_db *ree_rule_db;
uint32_t rule_dbi_len;
uint32_t rule_db_len;
@ -806,7 +802,7 @@ otx2_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
ree_func_trace();
ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, &rule_dbi_len);
ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
if (ret)
return ret;
@ -816,10 +812,10 @@ otx2_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
}
ree_rule_db = (struct ree_rule_db *)rule_db;
ret = otx2_ree_rule_db_get(dev, (char *)ree_rule_db->entries,
ret = roc_ree_rule_db_get(vf, (char *)ree_rule_db->entries,
rule_db_len, NULL, 0);
if (ret) {
otx2_err("Could not export rule db");
cn9k_err("Could not export rule db");
return -EFAULT;
}
ree_rule_db->number_of_entries =
@ -830,34 +826,20 @@ otx2_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
return 0;
}
static int
ree_get_blkaddr(struct otx2_dev *dev)
{
int pf;
pf = otx2_get_pf(dev->pf_func);
if (pf == REE0_PF)
return RVU_BLOCK_ADDR_REE0;
else if (pf == REE1_PF)
return RVU_BLOCK_ADDR_REE1;
else
return 0;
}
static struct rte_regexdev_ops otx2_ree_ops = {
.dev_info_get = otx2_ree_dev_info_get,
.dev_configure = otx2_ree_dev_config,
.dev_qp_setup = otx2_ree_queue_pair_setup,
.dev_start = otx2_ree_start,
.dev_stop = otx2_ree_stop,
.dev_close = otx2_ree_close,
static struct rte_regexdev_ops cn9k_ree_ops = {
.dev_info_get = cn9k_ree_dev_info_get,
.dev_configure = cn9k_ree_dev_config,
.dev_qp_setup = cn9k_ree_queue_pair_setup,
.dev_start = cn9k_ree_start,
.dev_stop = cn9k_ree_stop,
.dev_close = cn9k_ree_close,
.dev_attr_get = NULL,
.dev_attr_set = NULL,
.dev_rule_db_update = otx2_ree_rule_db_update,
.dev_rule_db_update = cn9k_ree_rule_db_update,
.dev_rule_db_compile_activate =
otx2_ree_rule_db_compile_activate,
.dev_db_import = otx2_ree_rule_db_import,
.dev_db_export = otx2_ree_rule_db_export,
cn9k_ree_rule_db_compile_activate,
.dev_db_import = cn9k_ree_rule_db_import,
.dev_db_export = cn9k_ree_rule_db_export,
.dev_xstats_names_get = NULL,
.dev_xstats_get = NULL,
.dev_xstats_by_name_get = NULL,
@ -867,18 +849,21 @@ static struct rte_regexdev_ops otx2_ree_ops = {
};
static int
otx2_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
cn9k_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
char name[RTE_REGEXDEV_NAME_MAX_LEN];
struct otx2_ree_data *data;
struct otx2_dev *otx2_dev;
struct cn9k_ree_data *data;
struct rte_regexdev *dev;
uint8_t max_matches = 0;
struct otx2_ree_vf *vf;
uint16_t nb_queues = 0;
struct roc_ree_vf *vf;
int ret;
ret = roc_plt_init();
if (ret < 0) {
plt_err("Failed to initialize platform model");
return ret;
}
rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
dev = ree_dev_register(name);
@ -887,63 +872,19 @@ otx2_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
goto exit;
}
dev->dev_ops = &otx2_ree_ops;
dev->dev_ops = &cn9k_ree_ops;
dev->device = &pci_dev->device;
/* Get private data space allocated */
data = dev->data->dev_private;
vf = &data->vf;
otx2_dev = &vf->otx2_dev;
/* Initialize the base otx2_dev object */
ret = otx2_dev_init(pci_dev, otx2_dev);
vf->pci_dev = pci_dev;
ret = roc_ree_dev_init(vf);
if (ret) {
otx2_err("Could not initialize otx2_dev");
plt_err("Failed to initialize roc cpt rc=%d", ret);
goto dev_unregister;
}
/* Get REE block address */
vf->block_address = ree_get_blkaddr(otx2_dev);
if (!vf->block_address) {
otx2_err("Could not determine block PF number");
goto otx2_dev_fini;
}
/* Get number of queues available on the device */
ret = otx2_ree_available_queues_get(dev, &nb_queues);
if (ret) {
otx2_err("Could not determine the number of queues available");
goto otx2_dev_fini;
}
/* Don't exceed the limits set per VF */
nb_queues = RTE_MIN(nb_queues, OTX2_REE_MAX_QUEUES_PER_VF);
if (nb_queues == 0) {
otx2_err("No free queues available on the device");
goto otx2_dev_fini;
}
vf->max_queues = nb_queues;
otx2_ree_dbg("Max queues supported by device: %d", vf->max_queues);
/* Get number of maximum matches supported on the device */
ret = otx2_ree_max_matches_get(dev, &max_matches);
if (ret) {
otx2_err("Could not determine the maximum matches supported");
goto otx2_dev_fini;
}
/* Don't exceed the limits set per VF */
max_matches = RTE_MIN(max_matches, OTX2_REE_MAX_MATCHES_PER_VF);
if (max_matches == 0) {
otx2_err("Could not determine the maximum matches supported");
goto otx2_dev_fini;
}
vf->max_matches = max_matches;
otx2_ree_dbg("Max matches supported by device: %d", vf->max_matches);
data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
RTE_REGEX_PCRE_RULE_ANCHORED_F;
data->regexdev_capa = 0;
@ -954,18 +895,16 @@ otx2_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
dev->state = RTE_REGEXDEV_READY;
return 0;
otx2_dev_fini:
otx2_dev_fini(pci_dev, otx2_dev);
dev_unregister:
ree_dev_unregister(dev);
exit:
otx2_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
cn9k_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
pci_dev->id.vendor_id, pci_dev->id.device_id);
return ret;
}
static int
otx2_ree_pci_remove(struct rte_pci_device *pci_dev)
cn9k_ree_pci_remove(struct rte_pci_device *pci_dev)
{
char name[RTE_REGEXDEV_NAME_MAX_LEN];
struct rte_regexdev *dev = NULL;
@ -986,20 +925,20 @@ otx2_ree_pci_remove(struct rte_pci_device *pci_dev)
static struct rte_pci_id pci_id_ree_table[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_REE_PF)
PCI_DEVID_CNXK_RVU_REE_PF)
},
{
.vendor_id = 0,
}
};
static struct rte_pci_driver otx2_regexdev_pmd = {
static struct rte_pci_driver cn9k_regexdev_pmd = {
.id_table = pci_id_ree_table,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = otx2_ree_pci_probe,
.remove = otx2_ree_pci_remove,
.probe = cn9k_ree_pci_probe,
.remove = cn9k_ree_pci_remove,
};
RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_OCTEONTX2_PMD, otx2_regexdev_pmd);
RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_OCTEONTX2_PMD, pci_id_ree_table);
RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_CN9K_PMD, cn9k_regexdev_pmd);
RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_CN9K_PMD, pci_id_ree_table);

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _CN9K_REGEXDEV_H_
#define _CN9K_REGEXDEV_H_
#include <rte_common.h>
#include <rte_regexdev.h>
#include "roc_api.h"
#define cn9k_ree_dbg plt_ree_dbg
#define cn9k_err plt_err
#define ree_func_trace cn9k_ree_dbg
/* Marvell CN9K Regex PMD device name */
#define REGEXDEV_NAME_CN9K_PMD regex_cn9k
/**
* Device private data
*/
struct cn9k_ree_data {
uint32_t regexdev_capa;
uint64_t rule_flags;
/**< Feature flags exposes HW/SW features for the given device */
uint16_t max_rules_per_group;
/**< Maximum rules supported per subset by this device */
uint16_t max_groups;
/**< Maximum subset supported by this device */
void **queue_pairs;
/**< Array of pointers to queue pairs. */
uint16_t nb_queue_pairs;
/**< Number of device queue pairs. */
struct roc_ree_vf vf;
/**< vf data */
struct rte_regexdev_rule *rules;
/**< rules to be compiled */
uint16_t nb_rules;
/**< number of rules */
} __rte_cache_aligned;
#endif /* _CN9K_REGEXDEV_H_ */

View File

@ -5,9 +5,8 @@
#include <rte_malloc.h>
#include <rte_regexdev.h>
#include "otx2_regexdev.h"
#include "otx2_regexdev_compiler.h"
#include "otx2_regexdev_mbox.h"
#include "cn9k_regexdev.h"
#include "cn9k_regexdev_compiler.h"
#ifdef REE_COMPILER_SDK
#include <rxp-compiler.h>
@ -65,7 +64,7 @@ ree_rule_db_compile(const struct rte_regexdev_rule *rules,
nb_rules*sizeof(struct rxp_rule_entry), 0);
if (ruleset.rules == NULL) {
otx2_err("Could not allocate memory for rule compilation\n");
cn9k_err("Could not allocate memory for rule compilation\n");
return -EFAULT;
}
if (rof_for_incremental_compile)
@ -126,9 +125,10 @@ ree_rule_db_compile(const struct rte_regexdev_rule *rules,
}
int
otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
cn9k_ree_rule_db_compile_prog(struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct cn9k_ree_data *data = dev->data->dev_private;
struct roc_ree_vf *vf = &data->vf;
char compiler_version[] = "20.5.2.eda0fa2";
char timestamp[] = "19700101_000001";
uint32_t rule_db_len, rule_dbi_len;
@ -144,25 +144,25 @@ otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
ree_func_trace();
ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, &rule_dbi_len);
ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
if (ret != 0) {
otx2_err("Could not get rule db length");
cn9k_err("Could not get rule db length");
return ret;
}
if (rule_db_len > 0) {
otx2_ree_dbg("Incremental compile, rule db len %d rule dbi len %d",
cn9k_ree_dbg("Incremental compile, rule db len %d rule dbi len %d",
rule_db_len, rule_dbi_len);
rule_db = rte_malloc("ree_rule_db", rule_db_len, 0);
if (!rule_db) {
otx2_err("Could not allocate memory for rule db");
cn9k_err("Could not allocate memory for rule db");
return -EFAULT;
}
ret = otx2_ree_rule_db_get(dev, rule_db, rule_db_len,
ret = roc_ree_rule_db_get(vf, rule_db, rule_db_len,
(char *)rule_dbi, rule_dbi_len);
if (ret) {
otx2_err("Could not read rule db");
cn9k_err("Could not read rule db");
rte_free(rule_db);
return -EFAULT;
}
@ -188,7 +188,7 @@ otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
ret = ree_rule_db_compile(data->rules, data->nb_rules, &rof,
&rofi, &rof_inc, rofi_inc_p);
if (rofi->number_of_entries == 0) {
otx2_ree_dbg("No change to rule db");
cn9k_ree_dbg("No change to rule db");
ret = 0;
goto free_structs;
}
@ -201,14 +201,14 @@ otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
&rofi, NULL, NULL);
}
if (ret != 0) {
otx2_err("Could not compile rule db");
cn9k_err("Could not compile rule db");
goto free_structs;
}
rule_db_len = rof->number_of_entries * sizeof(struct rxp_rof_entry);
ret = otx2_ree_rule_db_prog(dev, (char *)rof->rof_entries, rule_db_len,
ret = roc_ree_rule_db_prog(vf, (char *)rof->rof_entries, rule_db_len,
rofi_rof_entries, rule_dbi_len);
if (ret)
otx2_err("Could not program rule db");
cn9k_err("Could not program rule db");
free_structs:
rxp_free_structs(NULL, NULL, NULL, NULL, NULL, &rof, NULL, &rofi, NULL,
@ -221,7 +221,7 @@ otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
}
#else
int
otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
cn9k_ree_rule_db_compile_prog(struct rte_regexdev *dev)
{
RTE_SET_USED(dev);
return -ENOTSUP;

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _CN9K_REGEXDEV_COMPILER_H_
#define _CN9K_REGEXDEV_COMPILER_H_
int
cn9k_ree_rule_db_compile_prog(struct rte_regexdev *dev);
#endif /* _CN9K_REGEXDEV_COMPILER_H_ */

View File

@ -16,12 +16,10 @@ if lib.found()
endif
sources = files(
'otx2_regexdev.c',
'otx2_regexdev_compiler.c',
'otx2_regexdev_hw_access.c',
'otx2_regexdev_mbox.c',
'cn9k_regexdev.c',
'cn9k_regexdev_compiler.c',
)
deps += ['bus_pci', 'common_octeontx2', 'regexdev']
deps += ['bus_pci', 'regexdev']
deps += ['common_cnxk', 'mempool_cnxk']
includes += include_directories('../../common/octeontx2')

View File

@ -3,6 +3,6 @@
drivers = [
'mlx5',
'octeontx2',
'cn9k',
]
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc

View File

@ -1,109 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_H_
#define _OTX2_REGEXDEV_H_
#include <rte_common.h>
#include <rte_regexdev.h>
#include "otx2_dev.h"
#define ree_func_trace otx2_ree_dbg
/* Marvell OCTEON TX2 Regex PMD device name */
#define REGEXDEV_NAME_OCTEONTX2_PMD regex_octeontx2
#define OTX2_REE_MAX_LFS 36
#define OTX2_REE_MAX_QUEUES_PER_VF 36
#define OTX2_REE_MAX_MATCHES_PER_VF 254
#define OTX2_REE_MAX_PAYLOAD_SIZE (1 << 14)
#define OTX2_REE_NON_INC_PROG 0
#define OTX2_REE_INC_PROG 1
#define REE_MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
/**
* Device vf data
*/
struct otx2_ree_vf {
struct otx2_dev otx2_dev;
/**< Base class */
uint16_t max_queues;
/**< Max queues supported */
uint8_t nb_queues;
/**< Number of regex queues attached */
uint16_t max_matches;
/**< Max matches supported*/
uint16_t lf_msixoff[OTX2_REE_MAX_LFS];
/**< MSI-X offsets */
uint8_t block_address;
/**< REE Block Address */
uint8_t err_intr_registered:1;
/**< Are error interrupts registered? */
};
/**
* Device private data
*/
struct otx2_ree_data {
uint32_t regexdev_capa;
uint64_t rule_flags;
/**< Feature flags exposes HW/SW features for the given device */
uint16_t max_rules_per_group;
/**< Maximum rules supported per subset by this device */
uint16_t max_groups;
/**< Maximum subset supported by this device */
void **queue_pairs;
/**< Array of pointers to queue pairs. */
uint16_t nb_queue_pairs;
/**< Number of device queue pairs. */
struct otx2_ree_vf vf;
/**< vf data */
struct rte_regexdev_rule *rules;
/**< rules to be compiled */
uint16_t nb_rules;
/**< number of rules */
} __rte_cache_aligned;
struct otx2_ree_rid {
uintptr_t rid;
/** Request id of a ree operation */
uint64_t user_id;
/* Client data */
/**< IOVA address of the pattern to be matched. */
};
struct otx2_ree_pending_queue {
uint64_t pending_count;
/** Pending requests count */
struct otx2_ree_rid *rid_queue;
/** Array of pending requests */
uint16_t enq_tail;
/** Tail of queue to be used for enqueue */
uint16_t deq_head;
/** Head of queue to be used for dequeue */
};
struct otx2_ree_qp {
uint32_t id;
/**< Queue pair id */
uintptr_t base;
/**< Base address where BAR is mapped */
struct otx2_ree_pending_queue pend_q;
/**< Pending queue */
rte_iova_t iq_dma_addr;
/**< Instruction queue address */
uint32_t otx2_regexdev_jobid;
/**< Job ID */
uint32_t write_offset;
/**< write offset */
regexdev_stop_flush_t cb;
/**< Callback function called during rte_regex_dev_stop()*/
};
#endif /* _OTX2_REGEXDEV_H_ */

View File

@ -1,11 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_COMPILER_H_
#define _OTX2_REGEXDEV_COMPILER_H_
int
otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev);
#endif /* _OTX2_REGEXDEV_COMPILER_H_ */

View File

@ -1,167 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "otx2_common.h"
#include "otx2_dev.h"
#include "otx2_regexdev_hw_access.h"
#include "otx2_regexdev_mbox.h"
static void
ree_lf_err_intr_handler(void *param)
{
uintptr_t base = (uintptr_t)param;
uint8_t lf_id;
uint64_t intr;
lf_id = (base >> 12) & 0xFF;
intr = otx2_read64(base + OTX2_REE_LF_MISC_INT);
if (intr == 0)
return;
otx2_ree_dbg("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
/* Clear interrupt */
otx2_write64(intr, base + OTX2_REE_LF_MISC_INT);
}
static void
ree_lf_err_intr_unregister(const struct rte_regexdev *dev, uint16_t msix_off,
uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *handle = pci_dev->intr_handle;
/* Disable error interrupts */
otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1C);
otx2_unregister_irq(handle, ree_lf_err_intr_handler, (void *)base,
msix_off);
}
void
otx2_ree_err_intr_unregister(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
uintptr_t base;
uint32_t i;
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_REE_LF_BAR2(vf, i);
ree_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
}
vf->err_intr_registered = 0;
}
static int
ree_lf_err_intr_register(const struct rte_regexdev *dev, uint16_t msix_off,
uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *handle = pci_dev->intr_handle;
int ret;
/* Disable error interrupts */
otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1C);
/* Register error interrupt handler */
ret = otx2_register_irq(handle, ree_lf_err_intr_handler, (void *)base,
msix_off);
if (ret)
return ret;
/* Enable error interrupts */
otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1S);
return 0;
}
int
otx2_ree_err_intr_register(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
uint32_t i, j, ret;
uintptr_t base;
for (i = 0; i < vf->nb_queues; i++) {
if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
otx2_err("Invalid REE LF MSI-X offset: 0x%x",
vf->lf_msixoff[i]);
return -EINVAL;
}
}
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_REE_LF_BAR2(vf, i);
ret = ree_lf_err_intr_register(dev, vf->lf_msixoff[i], base);
if (ret)
goto intr_unregister;
}
vf->err_intr_registered = 1;
return 0;
intr_unregister:
/* Unregister the ones already registered */
for (j = 0; j < i; j++) {
base = OTX2_REE_LF_BAR2(vf, j);
ree_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
}
return ret;
}
int
otx2_ree_iq_enable(const struct rte_regexdev *dev, const struct otx2_ree_qp *qp,
uint8_t pri, uint32_t size_div2)
{
union otx2_ree_lf_sbuf_addr base;
union otx2_ree_lf_ena lf_ena;
/* Set instruction queue size and priority */
otx2_ree_config_lf(dev, qp->id, pri, size_div2);
/* Set instruction queue base address */
/* Should be written after SBUF_CTL and before LF_ENA */
base.u = otx2_read64(qp->base + OTX2_REE_LF_SBUF_ADDR);
base.s.ptr = qp->iq_dma_addr >> 7;
otx2_write64(base.u, qp->base + OTX2_REE_LF_SBUF_ADDR);
/* Enable instruction queue */
lf_ena.u = otx2_read64(qp->base + OTX2_REE_LF_ENA);
lf_ena.s.ena = 1;
otx2_write64(lf_ena.u, qp->base + OTX2_REE_LF_ENA);
return 0;
}
void
otx2_ree_iq_disable(struct otx2_ree_qp *qp)
{
union otx2_ree_lf_ena lf_ena;
/* Stop instruction execution */
lf_ena.u = otx2_read64(qp->base + OTX2_REE_LF_ENA);
lf_ena.s.ena = 0x0;
otx2_write64(lf_ena.u, qp->base + OTX2_REE_LF_ENA);
}
int
otx2_ree_max_matches_get(const struct rte_regexdev *dev, uint8_t *max_matches)
{
union otx2_ree_af_reexm_max_match reexm_max_match;
int ret;
ret = otx2_ree_af_reg_read(dev, REE_AF_REEXM_MAX_MATCH,
&reexm_max_match.u);
if (ret)
return ret;
*max_matches = reexm_max_match.s.max;
return 0;
}

View File

@ -1,202 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_HW_ACCESS_H_
#define _OTX2_REGEXDEV_HW_ACCESS_H_
#include <stdint.h>
#include "otx2_regexdev.h"
/* REE instruction queue length */
#define OTX2_REE_IQ_LEN (1 << 13)
#define OTX2_REE_DEFAULT_CMD_QLEN OTX2_REE_IQ_LEN
/* Status register bits */
#define OTX2_REE_STATUS_PMI_EOJ_BIT (1 << 14)
#define OTX2_REE_STATUS_PMI_SOJ_BIT (1 << 13)
#define OTX2_REE_STATUS_MP_CNT_DET_BIT (1 << 7)
#define OTX2_REE_STATUS_MM_CNT_DET_BIT (1 << 6)
#define OTX2_REE_STATUS_ML_CNT_DET_BIT (1 << 5)
#define OTX2_REE_STATUS_MST_CNT_DET_BIT (1 << 4)
#define OTX2_REE_STATUS_MPT_CNT_DET_BIT (1 << 3)
/* Register offsets */
/* REE LF registers */
#define OTX2_REE_LF_DONE_INT 0x120ull
#define OTX2_REE_LF_DONE_INT_W1S 0x130ull
#define OTX2_REE_LF_DONE_INT_ENA_W1S 0x138ull
#define OTX2_REE_LF_DONE_INT_ENA_W1C 0x140ull
#define OTX2_REE_LF_MISC_INT 0x300ull
#define OTX2_REE_LF_MISC_INT_W1S 0x310ull
#define OTX2_REE_LF_MISC_INT_ENA_W1S 0x320ull
#define OTX2_REE_LF_MISC_INT_ENA_W1C 0x330ull
#define OTX2_REE_LF_ENA 0x10ull
#define OTX2_REE_LF_SBUF_ADDR 0x20ull
#define OTX2_REE_LF_DONE 0x100ull
#define OTX2_REE_LF_DONE_ACK 0x110ull
#define OTX2_REE_LF_DONE_WAIT 0x148ull
#define OTX2_REE_LF_DOORBELL 0x400ull
#define OTX2_REE_LF_OUTSTAND_JOB 0x410ull
/* BAR 0 */
#define OTX2_REE_AF_QUE_SBUF_CTL(a) (0x1200ull | (uint64_t)(a) << 3)
#define OTX2_REE_PRIV_LF_CFG(a) (0x41000ull | (uint64_t)(a) << 3)
#define OTX2_REE_LF_BAR2(vf, q_id) \
((vf)->otx2_dev.bar2 + \
(((vf)->block_address << 20) | ((q_id) << 12)))
#define OTX2_REE_QUEUE_HI_PRIO 0x1
enum ree_desc_type_e {
REE_TYPE_JOB_DESC = 0x0,
REE_TYPE_RESULT_DESC = 0x1,
REE_TYPE_ENUM_LAST = 0x2
};
union otx2_ree_priv_lf_cfg {
uint64_t u;
struct {
uint64_t slot : 8;
uint64_t pf_func : 16;
uint64_t reserved_24_62 : 39;
uint64_t ena : 1;
} s;
};
union otx2_ree_lf_sbuf_addr {
uint64_t u;
struct {
uint64_t off : 7;
uint64_t ptr : 46;
uint64_t reserved_53_63 : 11;
} s;
};
union otx2_ree_lf_ena {
uint64_t u;
struct {
uint64_t ena : 1;
uint64_t reserved_1_63 : 63;
} s;
};
union otx2_ree_af_reexm_max_match {
uint64_t u;
struct {
uint64_t max : 8;
uint64_t reserved_8_63 : 56;
} s;
};
union otx2_ree_lf_done {
uint64_t u;
struct {
uint64_t done : 20;
uint64_t reserved_20_63 : 44;
} s;
};
union otx2_ree_inst {
uint64_t u[8];
struct {
uint64_t doneint : 1;
uint64_t reserved_1_3 : 3;
uint64_t dg : 1;
uint64_t reserved_5_7 : 3;
uint64_t ooj : 1;
uint64_t reserved_9_15 : 7;
uint64_t reserved_16_63 : 48;
uint64_t inp_ptr_addr : 64;
uint64_t inp_ptr_ctl : 64;
uint64_t res_ptr_addr : 64;
uint64_t wq_ptr : 64;
uint64_t tag : 32;
uint64_t tt : 2;
uint64_t ggrp : 10;
uint64_t reserved_364_383 : 20;
uint64_t reserved_384_391 : 8;
uint64_t ree_job_id : 24;
uint64_t ree_job_ctrl : 16;
uint64_t ree_job_length : 15;
uint64_t reserved_447_447 : 1;
uint64_t ree_job_subset_id_0 : 16;
uint64_t ree_job_subset_id_1 : 16;
uint64_t ree_job_subset_id_2 : 16;
uint64_t ree_job_subset_id_3 : 16;
} cn98xx;
};
union otx2_ree_res_status {
uint64_t u;
struct {
uint64_t job_type : 3;
uint64_t mpt_cnt_det : 1;
uint64_t mst_cnt_det : 1;
uint64_t ml_cnt_det : 1;
uint64_t mm_cnt_det : 1;
uint64_t mp_cnt_det : 1;
uint64_t mode : 2;
uint64_t reserved_10_11 : 2;
uint64_t reserved_12_12 : 1;
uint64_t pmi_soj : 1;
uint64_t pmi_eoj : 1;
uint64_t reserved_15_15 : 1;
uint64_t reserved_16_63 : 48;
} s;
};
union otx2_ree_res {
uint64_t u[8];
struct ree_res_s_98 {
uint64_t done : 1;
uint64_t hwjid : 7;
uint64_t ree_res_job_id : 24;
uint64_t ree_res_status : 16;
uint64_t ree_res_dmcnt : 8;
uint64_t ree_res_mcnt : 8;
uint64_t ree_meta_ptcnt : 16;
uint64_t ree_meta_icnt : 16;
uint64_t ree_meta_lcnt : 16;
uint64_t ree_pmi_min_byte_ptr : 16;
uint64_t ree_err : 1;
uint64_t reserved_129_190 : 62;
uint64_t doneint : 1;
uint64_t reserved_192_255 : 64;
uint64_t reserved_256_319 : 64;
uint64_t reserved_320_383 : 64;
uint64_t reserved_384_447 : 64;
uint64_t reserved_448_511 : 64;
} s;
};
union otx2_ree_match {
uint64_t u;
struct {
uint64_t ree_rule_id : 32;
uint64_t start_ptr : 14;
uint64_t reserved_46_47 : 2;
uint64_t match_length : 15;
uint64_t reserved_63_63 : 1;
} s;
};
void otx2_ree_err_intr_unregister(const struct rte_regexdev *dev);
int otx2_ree_err_intr_register(const struct rte_regexdev *dev);
int otx2_ree_iq_enable(const struct rte_regexdev *dev,
const struct otx2_ree_qp *qp,
uint8_t pri, uint32_t size_div128);
void otx2_ree_iq_disable(struct otx2_ree_qp *qp);
int otx2_ree_max_matches_get(const struct rte_regexdev *dev,
uint8_t *max_matches);
#endif /* _OTX2_REGEXDEV_HW_ACCESS_H_ */

View File

@ -1,401 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "otx2_common.h"
#include "otx2_dev.h"
#include "otx2_regexdev_mbox.h"
#include "otx2_regexdev.h"
int
otx2_ree_available_queues_get(const struct rte_regexdev *dev,
uint16_t *nb_queues)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct free_rsrcs_rsp *rsp;
struct otx2_dev *otx2_dev;
int ret;
otx2_dev = &vf->otx2_dev;
otx2_mbox_alloc_msg_free_rsrc_cnt(otx2_dev->mbox);
ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
if (ret)
return -EIO;
if (vf->block_address == RVU_BLOCK_ADDR_REE0)
*nb_queues = rsp->ree0;
else
*nb_queues = rsp->ree1;
return 0;
}
int
otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct rsrc_attach_req *req;
struct otx2_mbox *mbox;
/* Ask AF to attach required LFs */
mbox = vf->otx2_dev.mbox;
req = otx2_mbox_alloc_msg_attach_resources(mbox);
/* 1 LF = 1 queue */
req->reelfs = nb_queues;
req->ree_blkaddr = vf->block_address;
if (otx2_mbox_process(mbox) < 0)
return -EIO;
/* Update number of attached queues */
vf->nb_queues = nb_queues;
return 0;
}
int
otx2_ree_queues_detach(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct rsrc_detach_req *req;
struct otx2_mbox *mbox;
mbox = vf->otx2_dev.mbox;
req = otx2_mbox_alloc_msg_detach_resources(mbox);
req->reelfs = true;
req->partial = true;
if (otx2_mbox_process(mbox) < 0)
return -EIO;
/* Queues have been detached */
vf->nb_queues = 0;
return 0;
}
int
otx2_ree_msix_offsets_get(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct msix_offset_rsp *rsp;
struct otx2_mbox *mbox;
uint32_t i, ret;
/* Get REE MSI-X vector offsets */
mbox = vf->otx2_dev.mbox;
otx2_mbox_alloc_msg_msix_offset(mbox);
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
for (i = 0; i < vf->nb_queues; i++) {
if (vf->block_address == RVU_BLOCK_ADDR_REE0)
vf->lf_msixoff[i] = rsp->ree0_lf_msixoff[i];
else
vf->lf_msixoff[i] = rsp->ree1_lf_msixoff[i];
otx2_ree_dbg("lf_msixoff[%d] 0x%x", i, vf->lf_msixoff[i]);
}
return 0;
}
static int
ree_send_mbox_msg(struct otx2_ree_vf *vf)
{
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
int ret;
otx2_mbox_msg_send(mbox, 0);
ret = otx2_mbox_wait_for_rsp(mbox, 0);
if (ret < 0) {
otx2_err("Could not get mailbox response");
return ret;
}
return 0;
}
int
otx2_ree_config_lf(const struct rte_regexdev *dev, uint8_t lf, uint8_t pri,
uint32_t size)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct ree_lf_req_msg *req;
struct otx2_mbox *mbox;
int ret;
mbox = vf->otx2_dev.mbox;
req = otx2_mbox_alloc_msg_ree_config_lf(mbox);
req->lf = lf;
req->pri = pri ? 1 : 0;
req->size = size;
req->blkaddr = vf->block_address;
ret = otx2_mbox_process(mbox);
if (ret < 0) {
otx2_err("Could not get mailbox response");
return ret;
}
return 0;
}
int
otx2_ree_af_reg_read(const struct rte_regexdev *dev, uint64_t reg,
uint64_t *val)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct ree_rd_wr_reg_msg *msg;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
int ret, off;
mbox = vf->otx2_dev.mbox;
mdev = &mbox->dev[0];
msg = (struct ree_rd_wr_reg_msg *)otx2_mbox_alloc_msg_rsp(mbox, 0,
sizeof(*msg), sizeof(*msg));
if (msg == NULL) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
msg->hdr.sig = OTX2_MBOX_REQ_SIG;
msg->hdr.pcifunc = vf->otx2_dev.pf_func;
msg->is_write = 0;
msg->reg_offset = reg;
msg->ret_val = val;
msg->blkaddr = vf->block_address;
ret = ree_send_mbox_msg(vf);
if (ret < 0)
return ret;
off = mbox->rx_start +
RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
msg = (struct ree_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
*val = msg->val;
return 0;
}
int
otx2_ree_af_reg_write(const struct rte_regexdev *dev, uint64_t reg,
uint64_t val)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct ree_rd_wr_reg_msg *msg;
struct otx2_mbox *mbox;
mbox = vf->otx2_dev.mbox;
msg = (struct ree_rd_wr_reg_msg *)otx2_mbox_alloc_msg_rsp(mbox, 0,
sizeof(*msg), sizeof(*msg));
if (msg == NULL) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
msg->hdr.sig = OTX2_MBOX_REQ_SIG;
msg->hdr.pcifunc = vf->otx2_dev.pf_func;
msg->is_write = 1;
msg->reg_offset = reg;
msg->val = val;
msg->blkaddr = vf->block_address;
return ree_send_mbox_msg(vf);
}
int
otx2_ree_rule_db_get(const struct rte_regexdev *dev, char *rule_db,
uint32_t rule_db_len, char *rule_dbi, uint32_t rule_dbi_len)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct ree_rule_db_get_req_msg *req;
struct ree_rule_db_get_rsp_msg *rsp;
char *rule_db_ptr = (char *)rule_db;
struct otx2_ree_vf *vf = &data->vf;
struct otx2_mbox *mbox;
int ret, last = 0;
uint32_t len = 0;
mbox = vf->otx2_dev.mbox;
if (!rule_db) {
otx2_err("Couldn't return rule db due to NULL pointer");
return -EFAULT;
}
while (!last) {
req = (struct ree_rule_db_get_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->blkaddr = vf->block_address;
req->is_dbi = 0;
req->offset = len;
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
if (rule_db_len < len + rsp->len) {
otx2_err("Rule db size is too small");
return -EFAULT;
}
otx2_mbox_memcpy(rule_db_ptr, rsp->rule_db, rsp->len);
len += rsp->len;
rule_db_ptr = rule_db_ptr + rsp->len;
last = rsp->is_last;
}
if (rule_dbi) {
req = (struct ree_rule_db_get_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->blkaddr = vf->block_address;
req->is_dbi = 1;
req->offset = 0;
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
if (rule_dbi_len < rsp->len) {
otx2_err("Rule dbi size is too small");
return -EFAULT;
}
otx2_mbox_memcpy(rule_dbi, rsp->rule_db, rsp->len);
}
return 0;
}
int
otx2_ree_rule_db_len_get(const struct rte_regexdev *dev,
uint32_t *rule_db_len,
uint32_t *rule_dbi_len)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct ree_rule_db_len_rsp_msg *rsp;
struct otx2_ree_vf *vf = &data->vf;
struct ree_req_msg *req;
struct otx2_mbox *mbox;
int ret;
mbox = vf->otx2_dev.mbox;
req = (struct ree_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_LEN_GET;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->blkaddr = vf->block_address;
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
if (rule_db_len != NULL)
*rule_db_len = rsp->len;
if (rule_dbi_len != NULL)
*rule_dbi_len = rsp->inc_len;
return 0;
}
static int
ree_db_msg(const struct rte_regexdev *dev, const char *db, uint32_t db_len,
int inc, int dbi)
{
struct otx2_ree_data *data = dev->data->dev_private;
uint32_t len_left = db_len, offset = 0;
struct ree_rule_db_prog_req_msg *req;
struct otx2_ree_vf *vf = &data->vf;
const char *rule_db_ptr = db;
struct otx2_mbox *mbox;
struct msg_rsp *rsp;
int ret;
mbox = vf->otx2_dev.mbox;
while (len_left) {
req = (struct ree_rule_db_prog_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_PROG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->offset = offset;
req->total_len = db_len;
req->len = REE_RULE_DB_REQ_BLOCK_SIZE;
req->is_incremental = inc;
req->is_dbi = dbi;
req->blkaddr = vf->block_address;
if (len_left < REE_RULE_DB_REQ_BLOCK_SIZE) {
req->is_last = true;
req->len = len_left;
}
otx2_mbox_memcpy(req->rule_db, rule_db_ptr, req->len);
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret) {
otx2_err("Programming mailbox processing failed");
return ret;
}
len_left -= req->len;
offset += req->len;
rule_db_ptr = rule_db_ptr + req->len;
}
return 0;
}
int
otx2_ree_rule_db_prog(const struct rte_regexdev *dev, const char *rule_db,
uint32_t rule_db_len, const char *rule_dbi,
uint32_t rule_dbi_len)
{
int inc, ret;
if (rule_db_len == 0) {
otx2_err("Couldn't program empty rule db");
return -EFAULT;
}
inc = (rule_dbi_len != 0);
if ((rule_db == NULL) || (inc && (rule_dbi == NULL))) {
otx2_err("Couldn't program NULL rule db");
return -EFAULT;
}
if (inc) {
ret = ree_db_msg(dev, rule_dbi, rule_dbi_len, inc, 1);
if (ret)
return ret;
}
return ree_db_msg(dev, rule_db, rule_db_len, inc, 0);
}

View File

@ -1,38 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_MBOX_H_
#define _OTX2_REGEXDEV_MBOX_H_
#include <rte_regexdev.h>
int otx2_ree_available_queues_get(const struct rte_regexdev *dev,
uint16_t *nb_queues);
int otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues);
int otx2_ree_queues_detach(const struct rte_regexdev *dev);
int otx2_ree_msix_offsets_get(const struct rte_regexdev *dev);
int otx2_ree_config_lf(const struct rte_regexdev *dev, uint8_t lf, uint8_t pri,
uint32_t size);
int otx2_ree_af_reg_read(const struct rte_regexdev *dev, uint64_t reg,
uint64_t *val);
int otx2_ree_af_reg_write(const struct rte_regexdev *dev, uint64_t reg,
uint64_t val);
int otx2_ree_rule_db_get(const struct rte_regexdev *dev, char *rule_db,
uint32_t rule_db_len, char *rule_dbi, uint32_t rule_dbi_len);
int otx2_ree_rule_db_len_get(const struct rte_regexdev *dev,
uint32_t *rule_db_len, uint32_t *rule_dbi_len);
int otx2_ree_rule_db_prog(const struct rte_regexdev *dev, const char *rule_db,
uint32_t rule_db_len, const char *rule_dbi,
uint32_t rule_dbi_len);
#endif /* _OTX2_REGEXDEV_MBOX_H_ */