regex/octeontx2: introduce REE driver

Add meson based build infrastructure along with the
OTX2 regexdev (REE) device functions.
Add Marvell OCTEON TX2 regex guide.

Signed-off-by: Guy Kaneti <guyk@marvell.com>
This commit is contained in:
Guy Kaneti 2020-10-13 13:10:10 +03:00 committed by Thomas Monjalon
parent 6695369d29
commit 4cd1c5fd9e
17 changed files with 2273 additions and 1 deletions

View File

@ -1112,6 +1112,12 @@ F: doc/guides/compressdevs/features/zlib.ini
RegEx Drivers
-------------
Marvell OCTEON TX2 regex
M: Guy Kaneti <guyk@marvell.com>
F: drivers/regex/octeontx2/
F: doc/guides/regexdevs/octeontx2.rst
F: doc/guides/regexdevs/features/octeontx2.ini
Mellanox mlx5
M: Ori Kam <orika@nvidia.com>
F: drivers/regex/mlx5/

View File

@ -67,6 +67,8 @@ DPDK subsystem.
+---+-----+--------------------------------------------------------------+
| 9 | SDP | rte_ethdev |
+---+-----+--------------------------------------------------------------+
| 10| REE | rte_regexdev |
+---+-----+--------------------------------------------------------------+
PF0 is called the administrative / admin function (AF) and has exclusive
privileges to provision RVU functional block's LFs to each of the PF/VF.
@ -156,6 +158,9 @@ This section lists dataplane H/W block(s) available in OCTEON TX2 SoC.
#. **Crypto Device Driver**
See :doc:`../cryptodevs/octeontx2` for CPT crypto device driver information.
#. **Regex Device Driver**
See :doc:`../regexdevs/octeontx2` for REE regex device driver information.
Procedure to Setup Platform
---------------------------

View File

@ -0,0 +1,10 @@
;
; Supported features of the 'octeontx2' regex driver.
;
; Refer to default.ini for the full list of available driver features.
;
[Features]
PCRE back reference = Y
PCRE word boundary = Y
Run time compilation = Y
Armv8 = Y

View File

@ -13,3 +13,4 @@ which can be used from an application through RegEx API.
features_overview
mlx5
octeontx2

View File

@ -0,0 +1,38 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2020 Marvell International Ltd.
OCTEON TX2 REE Regexdev Driver
==============================
The OCTEON TX2 REE PMD (**librte_pmd_octeontx2_regex**) provides poll mode
regexdev driver support for the inbuilt regex device found in the **Marvell OCTEON TX2**
SoC family.
More information about OCTEON TX2 SoC can be found at `Marvell Official Website
<https://www.marvell.com/embedded-processors/infrastructure-processors/>`_.
Features
--------
Features of the OCTEON TX2 REE PMD are:
- 36 queues
- Up to 254 matches for each regex operation
Prerequisites and Compilation procedure
---------------------------------------
See :doc:`../platform/octeontx2` for setup information.
Debugging Options
-----------------
.. _table_octeontx2_regex_debug_options:
.. table:: OCTEON TX2 regex device debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | REE | --log-level='pmd\.regex\.octeontx2,8' |
+---+------------+-------------------------------------------------------+

View File

@ -146,6 +146,12 @@ New Features
``--portmask=N``
where N represents the hexadecimal bitmask of ports used.
* **Added Marvell OCTEON TX2 regex PMD.**
Added a new PMD driver for hardware regex offload block for OCTEON TX2 SoC.
See the :doc:`../regexdevs/octeontx2` for more details.
* **Updated ioat rawdev driver**
The ioat rawdev driver has been updated and enhanced. Changes include:

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020 Mellanox Technologies, Ltd
drivers = ['mlx5']
drivers = ['mlx5', 'octeontx2']
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
driver_name_fmt = 'rte_pmd_@0@'

View File

@ -0,0 +1,44 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(C) 2020 Marvell International Ltd.
#
if not is_linux
build = false
reason = 'only supported on Linux'
endif
lib = cc.find_library('librxp_compiler', required: false)
if lib.found()
ext_deps += lib
ext_deps += cc.find_library('libstdc++', required: true)
includes += include_directories(inc_dir)
cflags += ['-DREE_COMPILER_SDK']
endif
sources = files('otx2_regexdev.c',
'otx2_regexdev_hw_access.c',
'otx2_regexdev_mbox.c',
'otx2_regexdev_compiler.c'
)
extra_flags = []
# This integrated controller runs only on a arm64 machine, remove 32bit warnings
if not dpdk_conf.get('RTE_ARCH_64')
extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
endif
# for clang 32-bit compiles we need libatomic for 64-bit atomic ops
if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false
ext_deps += cc.find_library('atomic')
endif
foreach flag: extra_flags
if cc.has_argument(flag)
cflags += flag
endif
endforeach
name = 'octeontx2_regex'
deps += ['bus_pci', 'common_octeontx2', 'regexdev']
includes += include_directories('../../common/octeontx2')

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_H_
#define _OTX2_REGEXDEV_H_
#include <rte_common.h>
#include <rte_regexdev.h>
#include "otx2_dev.h"
#define ree_func_trace otx2_ree_dbg
/* Marvell OCTEON TX2 Regex PMD device name */
#define REGEXDEV_NAME_OCTEONTX2_PMD regex_octeontx2
#define OTX2_REE_MAX_LFS 36
#define OTX2_REE_MAX_QUEUES_PER_VF 36
#define OTX2_REE_MAX_MATCHES_PER_VF 254
#define OTX2_REE_MAX_PAYLOAD_SIZE (1 << 14)
#define OTX2_REE_NON_INC_PROG 0
#define OTX2_REE_INC_PROG 1
#define REE_MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
/**
* Device vf data
*/
struct otx2_ree_vf {
struct otx2_dev otx2_dev;
/**< Base class */
uint16_t max_queues;
/**< Max queues supported */
uint8_t nb_queues;
/**< Number of regex queues attached */
uint16_t max_matches;
/**< Max matches supported*/
uint16_t lf_msixoff[OTX2_REE_MAX_LFS];
/**< MSI-X offsets */
uint8_t block_address;
/**< REE Block Address */
uint8_t err_intr_registered:1;
/**< Are error interrupts registered? */
};
/**
* Device private data
*/
struct otx2_ree_data {
uint32_t regexdev_capa;
uint64_t rule_flags;
/**< Feature flags exposes HW/SW features for the given device */
uint16_t max_rules_per_group;
/**< Maximum rules supported per subset by this device */
uint16_t max_groups;
/**< Maximum subset supported by this device */
void **queue_pairs;
/**< Array of pointers to queue pairs. */
uint16_t nb_queue_pairs;
/**< Number of device queue pairs. */
struct otx2_ree_vf vf;
/**< vf data */
struct rte_regexdev_rule *rules;
/**< rules to be compiled */
uint16_t nb_rules;
/**< number of rules */
} __rte_cache_aligned;
struct otx2_ree_rid {
uintptr_t rid;
/** Request id of a ree operation */
uint64_t user_id;
/* Client data */
/**< IOVA address of the pattern to be matched. */
};
struct otx2_ree_pending_queue {
uint64_t pending_count;
/** Pending requests count */
struct otx2_ree_rid *rid_queue;
/** Array of pending requests */
uint16_t enq_tail;
/** Tail of queue to be used for enqueue */
uint16_t deq_head;
/** Head of queue to be used for dequeue */
};
struct otx2_ree_qp {
uint32_t id;
/**< Queue pair id */
uintptr_t base;
/**< Base address where BAR is mapped */
struct otx2_ree_pending_queue pend_q;
/**< Pending queue */
rte_iova_t iq_dma_addr;
/**< Instruction queue address */
uint32_t otx2_regexdev_jobid;
/**< Job ID */
uint32_t write_offset;
/**< write offset */
regexdev_stop_flush_t cb;
/**< Callback function called during rte_regex_dev_stop()*/
};
#endif /* _OTX2_REGEXDEV_H_ */

View File

@ -0,0 +1,229 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <rte_malloc.h>
#include <rte_regexdev.h>
#include "otx2_regexdev.h"
#include "otx2_regexdev_compiler.h"
#include "otx2_regexdev_mbox.h"
#ifdef REE_COMPILER_SDK
#include <rxp-compiler.h>
static int
ree_rule_db_compile(const struct rte_regexdev_rule *rules,
uint16_t nb_rules, struct rxp_rof **rof, struct rxp_rof **rofi,
struct rxp_rof *rof_for_incremental_compile,
struct rxp_rof *rofi_for_incremental_compile)
{
/*INPUT*/
struct rxp_prefix_selection_control_list *prefix_selection_control_list
= NULL;
struct rxp_blacklist_data_sample *blacklist_sample_data = NULL;
struct rxp_rule_ids_to_remove *rule_ids_to_remove = NULL;
struct rxp_roff *roff_for_incremental_compile = NULL;
/*OPTIONS - setting default values*/
enum rxp_virtual_prefix_mode virtual_prefix_mode =
RXP_VIRTUAL_PREFIX_MODE_0;
enum rxp_prefix_capacity prefix_capacity = RXP_PREFIX_CAPACITY_32K;
/**< rxp_global_regex_options_flags*/
enum rxp_compiler_objective objective = RXP_COMPILER_OBJECTIVE_5;
enum rxp_tpe_data_width tpe_data_width = RXP_TPE_DATA_WIDTH_4;
uint32_t compiler_options = RXP_COMPILER_OPTIONS_FORCE;
/**< rxp_compiler_options_flags*/
enum rxp_verbose_level verbose = RXP_VERBOSE_LEVEL_3;
enum rxp_version set_rxp_version = RXP_VERSION_V5_8;
uint32_t compiler_output_flags = 0;
/**< rxp_compiler_output_flags*/
uint32_t global_regex_options = 0;
/**< rxp_global_regex_options_flags*/
float set_auto_blacklist = 0;
uint32_t max_rep_max = 65535;
uint32_t divide_ruleset = 1;
struct rxp_ruleset ruleset;
float ptpb_threshold = 0;
uint32_t set_max = 0;
uint32_t threads = 1;
/*OUTPUT*/
struct rxp_rule_direction_analysis *rule_direction_analysis = NULL;
struct rxp_compilation_statistics *compilation_statistics = NULL;
struct rxp_prefix_selection_control_list *generated_pscl = NULL;
struct rxp_uncompiled_rules_log *uncompiled_rules_log = NULL;
struct rxp_critical_rules_rank *critical_rules_rank = NULL;
struct rxp_compiled_rules_log *compiled_rules_log = NULL;
struct rxp_roff *roff = NULL;
uint16_t i;
int ret;
ruleset.number_of_entries = nb_rules;
ruleset.rules = rte_malloc("rxp_rule_entry",
nb_rules*sizeof(struct rxp_rule_entry), 0);
if (ruleset.rules == NULL) {
otx2_err("Could not allocate memory for rule compilation\n");
return -EFAULT;
}
if (rof_for_incremental_compile)
compiler_options |= RXP_COMPILER_OPTIONS_INCREMENTAL;
if (rofi_for_incremental_compile)
compiler_options |= RXP_COMPILER_OPTIONS_CHECKSUM;
for (i = 0; i < nb_rules; i++) {
ruleset.rules[i].number_of_prefix_entries = 0;
ruleset.rules[i].prefix = NULL;
ruleset.rules[i].rule = rules[i].pcre_rule;
ruleset.rules[i].rule_id = rules[i].rule_id;
ruleset.rules[i].subset_id = rules[i].group_id;
ruleset.rules[i].rule_direction_type =
RXP_RULE_DIRECTION_TYPE_NONE;
}
ret = rxp_compile_advanced(
/*INPUT*/
&ruleset,
prefix_selection_control_list,
rof_for_incremental_compile,
roff_for_incremental_compile,
rofi_for_incremental_compile,
rule_ids_to_remove,
blacklist_sample_data,
/*OPTIONS*/
compiler_options,
prefix_capacity,
global_regex_options,
set_auto_blacklist,
set_max,
objective,
ptpb_threshold,
max_rep_max,
threads,
set_rxp_version,
verbose,
tpe_data_width,
virtual_prefix_mode,
compiler_output_flags,
divide_ruleset,
/*OUTPUT*/
&compilation_statistics,
&compiled_rules_log,
&critical_rules_rank,
&rule_direction_analysis,
&uncompiled_rules_log,
rof,
&roff,
rofi,
&generated_pscl);
rte_free(ruleset.rules);
return ret;
}
int
otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
char compiler_version[] = "20.5.2.eda0fa2";
char timestamp[] = "19700101_000001";
uint32_t rule_db_len, rule_dbi_len;
struct rxp_rof *rofi_inc_p = NULL;
struct rxp_rof_entry rule_dbi[6];
char *rofi_rof_entries = NULL;
struct rxp_rof *rofi = NULL;
struct rxp_rof *rof = NULL;
struct rxp_rof rofi_inc;
struct rxp_rof rof_inc;
char *rule_db = NULL;
int ret;
ree_func_trace();
ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, &rule_dbi_len);
if (ret != 0) {
otx2_err("Could not get rule db length");
return ret;
}
if (rule_db_len > 0) {
otx2_ree_dbg("Incremental compile, rule db len %d rule dbi len %d",
rule_db_len, rule_dbi_len);
rule_db = rte_malloc("ree_rule_db", rule_db_len, 0);
if (!rule_db) {
otx2_err("Could not allocate memory for rule db");
return -EFAULT;
}
ret = otx2_ree_rule_db_get(dev, rule_db, rule_db_len,
(char *)rule_dbi, rule_dbi_len);
if (ret) {
otx2_err("Could not read rule db");
rte_free(rule_db);
return -EFAULT;
}
rof_inc.rof_revision = 0;
rof_inc.rof_version = 2;
rof_inc.rof_entries = (struct rxp_rof_entry *)rule_db;
rof_inc.rxp_compiler_version = compiler_version;
rof_inc.timestamp = timestamp;
rof_inc.number_of_entries =
(rule_db_len/sizeof(struct rxp_rof_entry));
if (rule_dbi_len > 0) {
/* incremental compilation not the first time */
rofi_inc.rof_revision = 0;
rofi_inc.rof_version = 2;
rofi_inc.rof_entries = rule_dbi;
rofi_inc.rxp_compiler_version = compiler_version;
rofi_inc.timestamp = timestamp;
rofi_inc.number_of_entries =
(rule_dbi_len/sizeof(struct rxp_rof_entry));
rofi_inc_p = &rofi_inc;
}
ret = ree_rule_db_compile(data->rules, data->nb_rules, &rof,
&rofi, &rof_inc, rofi_inc_p);
if (rofi->number_of_entries == 0) {
otx2_ree_dbg("No change to rule db");
ret = 0;
goto free_structs;
}
rule_dbi_len = rofi->number_of_entries *
sizeof(struct rxp_rof_entry);
rofi_rof_entries = (char *)rofi->rof_entries;
} else {
/* full compilation */
ret = ree_rule_db_compile(data->rules, data->nb_rules, &rof,
&rofi, NULL, NULL);
}
if (ret != 0) {
otx2_err("Could not compile rule db");
goto free_structs;
}
rule_db_len = rof->number_of_entries * sizeof(struct rxp_rof_entry);
ret = otx2_ree_rule_db_prog(dev, (char *)rof->rof_entries, rule_db_len,
rofi_rof_entries, rule_dbi_len);
if (ret)
otx2_err("Could not program rule db");
free_structs:
rxp_free_structs(NULL, NULL, NULL, NULL, NULL, &rof, NULL, &rofi, NULL,
1);
if (rule_db)
rte_free(rule_db);
return ret;
}
#else
int
otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
{
RTE_SET_USED(dev);
return -ENOTSUP;
}
#endif

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_COMPILER_H_
#define _OTX2_REGEXDEV_COMPILER_H_
int
otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev);
#endif /* _OTX2_REGEXDEV_COMPILER_H_ */

View File

@ -0,0 +1,167 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "otx2_common.h"
#include "otx2_dev.h"
#include "otx2_regexdev_hw_access.h"
#include "otx2_regexdev_mbox.h"
static void
ree_lf_err_intr_handler(void *param)
{
uintptr_t base = (uintptr_t)param;
uint8_t lf_id;
uint64_t intr;
lf_id = (base >> 12) & 0xFF;
intr = otx2_read64(base + OTX2_REE_LF_MISC_INT);
if (intr == 0)
return;
otx2_ree_dbg("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
/* Clear interrupt */
otx2_write64(intr, base + OTX2_REE_LF_MISC_INT);
}
static void
ree_lf_err_intr_unregister(const struct rte_regexdev *dev, uint16_t msix_off,
uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *handle = &pci_dev->intr_handle;
/* Disable error interrupts */
otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1C);
otx2_unregister_irq(handle, ree_lf_err_intr_handler, (void *)base,
msix_off);
}
void
otx2_ree_err_intr_unregister(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
uintptr_t base;
uint32_t i;
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_REE_LF_BAR2(vf, i);
ree_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
}
vf->err_intr_registered = 0;
}
static int
ree_lf_err_intr_register(const struct rte_regexdev *dev, uint16_t msix_off,
uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *handle = &pci_dev->intr_handle;
int ret;
/* Disable error interrupts */
otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1C);
/* Register error interrupt handler */
ret = otx2_register_irq(handle, ree_lf_err_intr_handler, (void *)base,
msix_off);
if (ret)
return ret;
/* Enable error interrupts */
otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1S);
return 0;
}
int
otx2_ree_err_intr_register(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
uint32_t i, j, ret;
uintptr_t base;
for (i = 0; i < vf->nb_queues; i++) {
if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
otx2_err("Invalid REE LF MSI-X offset: 0x%x",
vf->lf_msixoff[i]);
return -EINVAL;
}
}
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_REE_LF_BAR2(vf, i);
ret = ree_lf_err_intr_register(dev, vf->lf_msixoff[i], base);
if (ret)
goto intr_unregister;
}
vf->err_intr_registered = 1;
return 0;
intr_unregister:
/* Unregister the ones already registered */
for (j = 0; j < i; j++) {
base = OTX2_REE_LF_BAR2(vf, j);
ree_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
}
return ret;
}
int
otx2_ree_iq_enable(const struct rte_regexdev *dev, const struct otx2_ree_qp *qp,
uint8_t pri, uint32_t size_div2)
{
union otx2_ree_lf_sbuf_addr base;
union otx2_ree_lf_ena lf_ena;
/* Set instruction queue size and priority */
otx2_ree_config_lf(dev, qp->id, pri, size_div2);
/* Set instruction queue base address */
/* Should be written after SBUF_CTL and before LF_ENA */
base.u = otx2_read64(qp->base + OTX2_REE_LF_SBUF_ADDR);
base.s.ptr = qp->iq_dma_addr >> 7;
otx2_write64(base.u, qp->base + OTX2_REE_LF_SBUF_ADDR);
/* Enable instruction queue */
lf_ena.u = otx2_read64(qp->base + OTX2_REE_LF_ENA);
lf_ena.s.ena = 1;
otx2_write64(lf_ena.u, qp->base + OTX2_REE_LF_ENA);
return 0;
}
void
otx2_ree_iq_disable(struct otx2_ree_qp *qp)
{
union otx2_ree_lf_ena lf_ena;
/* Stop instruction execution */
lf_ena.u = otx2_read64(qp->base + OTX2_REE_LF_ENA);
lf_ena.s.ena = 0x0;
otx2_write64(lf_ena.u, qp->base + OTX2_REE_LF_ENA);
}
int
otx2_ree_max_matches_get(const struct rte_regexdev *dev, uint8_t *max_matches)
{
union otx2_ree_af_reexm_max_match reexm_max_match;
int ret;
ret = otx2_ree_af_reg_read(dev, REE_AF_REEXM_MAX_MATCH,
&reexm_max_match.u);
if (ret)
return ret;
*max_matches = reexm_max_match.s.max;
return 0;
}

View File

@ -0,0 +1,202 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_HW_ACCESS_H_
#define _OTX2_REGEXDEV_HW_ACCESS_H_
#include <stdint.h>
#include "otx2_regexdev.h"
/* REE instruction queue length */
#define OTX2_REE_IQ_LEN (1 << 13)
#define OTX2_REE_DEFAULT_CMD_QLEN OTX2_REE_IQ_LEN
/* Status register bits */
#define OTX2_REE_STATUS_PMI_EOJ_BIT (1 << 14)
#define OTX2_REE_STATUS_PMI_SOJ_BIT (1 << 13)
#define OTX2_REE_STATUS_MP_CNT_DET_BIT (1 << 7)
#define OTX2_REE_STATUS_MM_CNT_DET_BIT (1 << 6)
#define OTX2_REE_STATUS_ML_CNT_DET_BIT (1 << 5)
#define OTX2_REE_STATUS_MST_CNT_DET_BIT (1 << 4)
#define OTX2_REE_STATUS_MPT_CNT_DET_BIT (1 << 3)
/* Register offsets */
/* REE LF registers */
#define OTX2_REE_LF_DONE_INT 0x120ull
#define OTX2_REE_LF_DONE_INT_W1S 0x130ull
#define OTX2_REE_LF_DONE_INT_ENA_W1S 0x138ull
#define OTX2_REE_LF_DONE_INT_ENA_W1C 0x140ull
#define OTX2_REE_LF_MISC_INT 0x300ull
#define OTX2_REE_LF_MISC_INT_W1S 0x310ull
#define OTX2_REE_LF_MISC_INT_ENA_W1S 0x320ull
#define OTX2_REE_LF_MISC_INT_ENA_W1C 0x330ull
#define OTX2_REE_LF_ENA 0x10ull
#define OTX2_REE_LF_SBUF_ADDR 0x20ull
#define OTX2_REE_LF_DONE 0x100ull
#define OTX2_REE_LF_DONE_ACK 0x110ull
#define OTX2_REE_LF_DONE_WAIT 0x148ull
#define OTX2_REE_LF_DOORBELL 0x400ull
#define OTX2_REE_LF_OUTSTAND_JOB 0x410ull
/* BAR 0 */
#define OTX2_REE_AF_QUE_SBUF_CTL(a) (0x1200ull | (uint64_t)(a) << 3)
#define OTX2_REE_PRIV_LF_CFG(a) (0x41000ull | (uint64_t)(a) << 3)
#define OTX2_REE_LF_BAR2(vf, q_id) \
((vf)->otx2_dev.bar2 + \
(((vf)->block_address << 20) | ((q_id) << 12)))
#define OTX2_REE_QUEUE_HI_PRIO 0x1
enum ree_desc_type_e {
REE_TYPE_JOB_DESC = 0x0,
REE_TYPE_RESULT_DESC = 0x1,
REE_TYPE_ENUM_LAST = 0x2
};
union otx2_ree_priv_lf_cfg {
uint64_t u;
struct {
uint64_t slot : 8;
uint64_t pf_func : 16;
uint64_t reserved_24_62 : 39;
uint64_t ena : 1;
} s;
};
union otx2_ree_lf_sbuf_addr {
uint64_t u;
struct {
uint64_t off : 7;
uint64_t ptr : 46;
uint64_t reserved_53_63 : 11;
} s;
};
union otx2_ree_lf_ena {
uint64_t u;
struct {
uint64_t ena : 1;
uint64_t reserved_1_63 : 63;
} s;
};
union otx2_ree_af_reexm_max_match {
uint64_t u;
struct {
uint64_t max : 8;
uint64_t reserved_8_63 : 56;
} s;
};
union otx2_ree_lf_done {
uint64_t u;
struct {
uint64_t done : 20;
uint64_t reserved_20_63 : 44;
} s;
};
union otx2_ree_inst {
uint64_t u[8];
struct {
uint64_t doneint : 1;
uint64_t reserved_1_3 : 3;
uint64_t dg : 1;
uint64_t reserved_5_7 : 3;
uint64_t ooj : 1;
uint64_t reserved_9_15 : 7;
uint64_t reserved_16_63 : 48;
uint64_t inp_ptr_addr : 64;
uint64_t inp_ptr_ctl : 64;
uint64_t res_ptr_addr : 64;
uint64_t wq_ptr : 64;
uint64_t tag : 32;
uint64_t tt : 2;
uint64_t ggrp : 10;
uint64_t reserved_364_383 : 20;
uint64_t reserved_384_391 : 8;
uint64_t ree_job_id : 24;
uint64_t ree_job_ctrl : 16;
uint64_t ree_job_length : 15;
uint64_t reserved_447_447 : 1;
uint64_t ree_job_subset_id_0 : 16;
uint64_t ree_job_subset_id_1 : 16;
uint64_t ree_job_subset_id_2 : 16;
uint64_t ree_job_subset_id_3 : 16;
} cn98xx;
};
union otx2_ree_res_status {
uint64_t u;
struct {
uint64_t job_type : 3;
uint64_t mpt_cnt_det : 1;
uint64_t mst_cnt_det : 1;
uint64_t ml_cnt_det : 1;
uint64_t mm_cnt_det : 1;
uint64_t mp_cnt_det : 1;
uint64_t mode : 2;
uint64_t reserved_10_11 : 2;
uint64_t reserved_12_12 : 1;
uint64_t pmi_soj : 1;
uint64_t pmi_eoj : 1;
uint64_t reserved_15_15 : 1;
uint64_t reserved_16_63 : 48;
} s;
};
union otx2_ree_res {
uint64_t u[8];
struct ree_res_s_98 {
uint64_t done : 1;
uint64_t hwjid : 7;
uint64_t ree_res_job_id : 24;
uint64_t ree_res_status : 16;
uint64_t ree_res_dmcnt : 8;
uint64_t ree_res_mcnt : 8;
uint64_t ree_meta_ptcnt : 16;
uint64_t ree_meta_icnt : 16;
uint64_t ree_meta_lcnt : 16;
uint64_t ree_pmi_min_byte_ptr : 16;
uint64_t ree_err : 1;
uint64_t reserved_129_190 : 62;
uint64_t doneint : 1;
uint64_t reserved_192_255 : 64;
uint64_t reserved_256_319 : 64;
uint64_t reserved_320_383 : 64;
uint64_t reserved_384_447 : 64;
uint64_t reserved_448_511 : 64;
} s;
};
union otx2_ree_match {
uint64_t u;
struct {
uint64_t ree_rule_id : 32;
uint64_t start_ptr : 14;
uint64_t reserved_46_47 : 2;
uint64_t match_length : 15;
uint64_t reserved_63_63 : 1;
} s;
};
void otx2_ree_err_intr_unregister(const struct rte_regexdev *dev);
int otx2_ree_err_intr_register(const struct rte_regexdev *dev);
int otx2_ree_iq_enable(const struct rte_regexdev *dev,
const struct otx2_ree_qp *qp,
uint8_t pri, uint32_t size_div128);
void otx2_ree_iq_disable(struct otx2_ree_qp *qp);
int otx2_ree_max_matches_get(const struct rte_regexdev *dev,
uint8_t *max_matches);
#endif /* _OTX2_REGEXDEV_HW_ACCESS_H_ */

View File

@ -0,0 +1,401 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "otx2_common.h"
#include "otx2_dev.h"
#include "otx2_regexdev_mbox.h"
#include "otx2_regexdev.h"
int
otx2_ree_available_queues_get(const struct rte_regexdev *dev,
uint16_t *nb_queues)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct free_rsrcs_rsp *rsp;
struct otx2_dev *otx2_dev;
int ret;
otx2_dev = &vf->otx2_dev;
otx2_mbox_alloc_msg_free_rsrc_cnt(otx2_dev->mbox);
ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
if (ret)
return -EIO;
if (vf->block_address == RVU_BLOCK_ADDR_REE0)
*nb_queues = rsp->ree0;
else
*nb_queues = rsp->ree1;
return 0;
}
int
otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct rsrc_attach_req *req;
struct otx2_mbox *mbox;
/* Ask AF to attach required LFs */
mbox = vf->otx2_dev.mbox;
req = otx2_mbox_alloc_msg_attach_resources(mbox);
/* 1 LF = 1 queue */
req->reelfs = nb_queues;
req->ree_blkaddr = vf->block_address;
if (otx2_mbox_process(mbox) < 0)
return -EIO;
/* Update number of attached queues */
vf->nb_queues = nb_queues;
return 0;
}
int
otx2_ree_queues_detach(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct rsrc_detach_req *req;
struct otx2_mbox *mbox;
mbox = vf->otx2_dev.mbox;
req = otx2_mbox_alloc_msg_detach_resources(mbox);
req->reelfs = true;
req->partial = true;
if (otx2_mbox_process(mbox) < 0)
return -EIO;
/* Queues have been detached */
vf->nb_queues = 0;
return 0;
}
int
otx2_ree_msix_offsets_get(const struct rte_regexdev *dev)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct msix_offset_rsp *rsp;
struct otx2_mbox *mbox;
uint32_t i, ret;
/* Get REE MSI-X vector offsets */
mbox = vf->otx2_dev.mbox;
otx2_mbox_alloc_msg_msix_offset(mbox);
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
for (i = 0; i < vf->nb_queues; i++) {
if (vf->block_address == RVU_BLOCK_ADDR_REE0)
vf->lf_msixoff[i] = rsp->ree0_lf_msixoff[i];
else
vf->lf_msixoff[i] = rsp->ree1_lf_msixoff[i];
otx2_ree_dbg("lf_msixoff[%d] 0x%x", i, vf->lf_msixoff[i]);
}
return 0;
}
static int
ree_send_mbox_msg(struct otx2_ree_vf *vf)
{
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
int ret;
otx2_mbox_msg_send(mbox, 0);
ret = otx2_mbox_wait_for_rsp(mbox, 0);
if (ret < 0) {
otx2_err("Could not get mailbox response");
return ret;
}
return 0;
}
int
otx2_ree_config_lf(const struct rte_regexdev *dev, uint8_t lf, uint8_t pri,
uint32_t size)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct ree_lf_req_msg *req;
struct otx2_mbox *mbox;
int ret;
mbox = vf->otx2_dev.mbox;
req = otx2_mbox_alloc_msg_ree_config_lf(mbox);
req->lf = lf;
req->pri = pri ? 1 : 0;
req->size = size;
req->blkaddr = vf->block_address;
ret = otx2_mbox_process(mbox);
if (ret < 0) {
otx2_err("Could not get mailbox response");
return ret;
}
return 0;
}
int
otx2_ree_af_reg_read(const struct rte_regexdev *dev, uint64_t reg,
uint64_t *val)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct ree_rd_wr_reg_msg *msg;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
int ret, off;
mbox = vf->otx2_dev.mbox;
mdev = &mbox->dev[0];
msg = (struct ree_rd_wr_reg_msg *)otx2_mbox_alloc_msg_rsp(mbox, 0,
sizeof(*msg), sizeof(*msg));
if (msg == NULL) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
msg->hdr.sig = OTX2_MBOX_REQ_SIG;
msg->hdr.pcifunc = vf->otx2_dev.pf_func;
msg->is_write = 0;
msg->reg_offset = reg;
msg->ret_val = val;
msg->blkaddr = vf->block_address;
ret = ree_send_mbox_msg(vf);
if (ret < 0)
return ret;
off = mbox->rx_start +
RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
msg = (struct ree_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
*val = msg->val;
return 0;
}
int
otx2_ree_af_reg_write(const struct rte_regexdev *dev, uint64_t reg,
uint64_t val)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct otx2_ree_vf *vf = &data->vf;
struct ree_rd_wr_reg_msg *msg;
struct otx2_mbox *mbox;
mbox = vf->otx2_dev.mbox;
msg = (struct ree_rd_wr_reg_msg *)otx2_mbox_alloc_msg_rsp(mbox, 0,
sizeof(*msg), sizeof(*msg));
if (msg == NULL) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
msg->hdr.sig = OTX2_MBOX_REQ_SIG;
msg->hdr.pcifunc = vf->otx2_dev.pf_func;
msg->is_write = 1;
msg->reg_offset = reg;
msg->val = val;
msg->blkaddr = vf->block_address;
return ree_send_mbox_msg(vf);
}
int
otx2_ree_rule_db_get(const struct rte_regexdev *dev, char *rule_db,
uint32_t rule_db_len, char *rule_dbi, uint32_t rule_dbi_len)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct ree_rule_db_get_req_msg *req;
struct ree_rule_db_get_rsp_msg *rsp;
char *rule_db_ptr = (char *)rule_db;
struct otx2_ree_vf *vf = &data->vf;
struct otx2_mbox *mbox;
int ret, last = 0;
uint32_t len = 0;
mbox = vf->otx2_dev.mbox;
if (!rule_db) {
otx2_err("Couldn't return rule db due to NULL pointer");
return -EFAULT;
}
while (!last) {
req = (struct ree_rule_db_get_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->blkaddr = vf->block_address;
req->is_dbi = 0;
req->offset = len;
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
if (rule_db_len < len + rsp->len) {
otx2_err("Rule db size is too small");
return -EFAULT;
}
otx2_mbox_memcpy(rule_db_ptr, rsp->rule_db, rsp->len);
len += rsp->len;
rule_db_ptr = rule_db_ptr + rsp->len;
last = rsp->is_last;
}
if (rule_dbi) {
req = (struct ree_rule_db_get_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->blkaddr = vf->block_address;
req->is_dbi = 1;
req->offset = 0;
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
if (rule_dbi_len < rsp->len) {
otx2_err("Rule dbi size is too small");
return -EFAULT;
}
otx2_mbox_memcpy(rule_dbi, rsp->rule_db, rsp->len);
}
return 0;
}
int
otx2_ree_rule_db_len_get(const struct rte_regexdev *dev,
uint32_t *rule_db_len,
uint32_t *rule_dbi_len)
{
struct otx2_ree_data *data = dev->data->dev_private;
struct ree_rule_db_len_rsp_msg *rsp;
struct otx2_ree_vf *vf = &data->vf;
struct ree_req_msg *req;
struct otx2_mbox *mbox;
int ret;
mbox = vf->otx2_dev.mbox;
req = (struct ree_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_LEN_GET;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->blkaddr = vf->block_address;
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
if (rule_db_len != NULL)
*rule_db_len = rsp->len;
if (rule_dbi_len != NULL)
*rule_dbi_len = rsp->inc_len;
return 0;
}
static int
ree_db_msg(const struct rte_regexdev *dev, const char *db, uint32_t db_len,
int inc, int dbi)
{
struct otx2_ree_data *data = dev->data->dev_private;
uint32_t len_left = db_len, offset = 0;
struct ree_rule_db_prog_req_msg *req;
struct otx2_ree_vf *vf = &data->vf;
const char *rule_db_ptr = db;
struct otx2_mbox *mbox;
struct msg_rsp *rsp;
int ret;
mbox = vf->otx2_dev.mbox;
while (len_left) {
req = (struct ree_rule_db_prog_req_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
sizeof(*rsp));
if (!req) {
otx2_err("Could not allocate mailbox message");
return -EFAULT;
}
req->hdr.id = MBOX_MSG_REE_RULE_DB_PROG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.pcifunc = vf->otx2_dev.pf_func;
req->offset = offset;
req->total_len = db_len;
req->len = REE_RULE_DB_REQ_BLOCK_SIZE;
req->is_incremental = inc;
req->is_dbi = dbi;
req->blkaddr = vf->block_address;
if (len_left < REE_RULE_DB_REQ_BLOCK_SIZE) {
req->is_last = true;
req->len = len_left;
}
otx2_mbox_memcpy(req->rule_db, rule_db_ptr, req->len);
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret) {
otx2_err("Programming mailbox processing failed");
return ret;
}
len_left -= req->len;
offset += req->len;
rule_db_ptr = rule_db_ptr + req->len;
}
return 0;
}
int
otx2_ree_rule_db_prog(const struct rte_regexdev *dev, const char *rule_db,
uint32_t rule_db_len, const char *rule_dbi,
uint32_t rule_dbi_len)
{
int inc, ret;
if (rule_db_len == 0) {
otx2_err("Couldn't program empty rule db");
return -EFAULT;
}
inc = (rule_dbi_len != 0);
if ((rule_db == NULL) || (inc && (rule_dbi == NULL))) {
otx2_err("Couldn't program NULL rule db");
return -EFAULT;
}
if (inc) {
ret = ree_db_msg(dev, rule_dbi, rule_dbi_len, inc, 1);
if (ret)
return ret;
}
return ree_db_msg(dev, rule_db, rule_db_len, inc, 0);
}

View File

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_REGEXDEV_MBOX_H_
#define _OTX2_REGEXDEV_MBOX_H_
#include <rte_regexdev.h>
int otx2_ree_available_queues_get(const struct rte_regexdev *dev,
uint16_t *nb_queues);
int otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues);
int otx2_ree_queues_detach(const struct rte_regexdev *dev);
int otx2_ree_msix_offsets_get(const struct rte_regexdev *dev);
int otx2_ree_config_lf(const struct rte_regexdev *dev, uint8_t lf, uint8_t pri,
uint32_t size);
int otx2_ree_af_reg_read(const struct rte_regexdev *dev, uint64_t reg,
uint64_t *val);
int otx2_ree_af_reg_write(const struct rte_regexdev *dev, uint64_t reg,
uint64_t val);
int otx2_ree_rule_db_get(const struct rte_regexdev *dev, char *rule_db,
uint32_t rule_db_len, char *rule_dbi, uint32_t rule_dbi_len);
int otx2_ree_rule_db_len_get(const struct rte_regexdev *dev,
uint32_t *rule_db_len, uint32_t *rule_dbi_len);
int otx2_ree_rule_db_prog(const struct rte_regexdev *dev, const char *rule_db,
uint32_t rule_db_len, const char *rule_dbi,
uint32_t rule_dbi_len);
#endif /* _OTX2_REGEXDEV_MBOX_H_ */

View File

@ -0,0 +1,3 @@
DPDK_21 {
local: *;
};