drivers: remove octeontx2 drivers

As per the deprecation notice,  In the view of enabling unified driver
for octeontx2(cn9k)/octeontx3(cn10k), removing drivers/octeontx2
drivers and replace with drivers/cnxk/ which
supports both octeontx2(cn9k) and octeontx3(cn10k) SoCs.

This patch does the following

- Replace drivers/common/octeontx2/ with drivers/common/cnxk/
- Replace drivers/mempool/octeontx2/ with drivers/mempool/cnxk/
- Replace drivers/net/octeontx2/ with drivers/net/cnxk/
- Replace drivers/event/octeontx2/ with drivers/event/cnxk/
- Replace drivers/crypto/octeontx2/ with drivers/crypto/cnxk/
- Rename config/arm/arm64_octeontx2_linux_gcc as
  config/arm/arm64_cn9k_linux_gcc
- Update the documentation and MAINTAINERS to reflect the same.
- Change the reference to OCTEONTX2 as OCTEON 9. Old release notes and
the kernel related documentation is not accounted for this change.

Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
Acked-by: Ruifeng Wang <ruifeng.wang@arm.com>
This commit is contained in:
Jerin Jacob 2021-12-11 14:34:35 +05:30 committed by Thomas Monjalon
parent 72c00ae9db
commit 33e71acf3d
149 changed files with 92 additions and 52124 deletions

View File

@ -536,15 +536,6 @@ T: git://dpdk.org/next/dpdk-next-net-mrvl
F: drivers/mempool/cnxk/ F: drivers/mempool/cnxk/
F: doc/guides/mempool/cnxk.rst F: doc/guides/mempool/cnxk.rst
Marvell OCTEON TX2
M: Jerin Jacob <jerinj@marvell.com>
M: Nithin Dabilpuram <ndabilpuram@marvell.com>
F: drivers/common/octeontx2/
F: drivers/mempool/octeontx2/
F: doc/guides/platform/img/octeontx2_*
F: doc/guides/platform/octeontx2.rst
F: doc/guides/mempool/octeontx2.rst
Bus Drivers Bus Drivers
----------- -----------
@ -797,21 +788,6 @@ F: drivers/net/mvneta/
F: doc/guides/nics/mvneta.rst F: doc/guides/nics/mvneta.rst
F: doc/guides/nics/features/mvneta.ini F: doc/guides/nics/features/mvneta.ini
Marvell OCTEON TX2
M: Jerin Jacob <jerinj@marvell.com>
M: Nithin Dabilpuram <ndabilpuram@marvell.com>
M: Kiran Kumar K <kirankumark@marvell.com>
T: git://dpdk.org/next/dpdk-next-net-mrvl
F: drivers/net/octeontx2/
F: doc/guides/nics/features/octeontx2*.ini
F: doc/guides/nics/octeontx2.rst
Marvell OCTEON TX2 - security
M: Anoob Joseph <anoobj@marvell.com>
T: git://dpdk.org/next/dpdk-next-crypto
F: drivers/common/octeontx2/otx2_sec*
F: drivers/net/octeontx2/otx2_ethdev_sec*
Marvell OCTEON TX EP - endpoint Marvell OCTEON TX EP - endpoint
M: Nalla Pradeep <pnalla@marvell.com> M: Nalla Pradeep <pnalla@marvell.com>
M: Radha Mohan Chintakuntla <radhac@marvell.com> M: Radha Mohan Chintakuntla <radhac@marvell.com>
@ -1117,13 +1093,6 @@ F: drivers/crypto/nitrox/
F: doc/guides/cryptodevs/nitrox.rst F: doc/guides/cryptodevs/nitrox.rst
F: doc/guides/cryptodevs/features/nitrox.ini F: doc/guides/cryptodevs/features/nitrox.ini
Marvell OCTEON TX2 crypto
M: Ankur Dwivedi <adwivedi@marvell.com>
M: Anoob Joseph <anoobj@marvell.com>
F: drivers/crypto/octeontx2/
F: doc/guides/cryptodevs/octeontx2.rst
F: doc/guides/cryptodevs/features/octeontx2.ini
Mellanox mlx5 Mellanox mlx5
M: Matan Azrad <matan@nvidia.com> M: Matan Azrad <matan@nvidia.com>
F: drivers/crypto/mlx5/ F: drivers/crypto/mlx5/
@ -1300,12 +1269,6 @@ M: Shijith Thotton <sthotton@marvell.com>
F: drivers/event/cnxk/ F: drivers/event/cnxk/
F: doc/guides/eventdevs/cnxk.rst F: doc/guides/eventdevs/cnxk.rst
Marvell OCTEON TX2
M: Pavan Nikhilesh <pbhagavatula@marvell.com>
M: Jerin Jacob <jerinj@marvell.com>
F: drivers/event/octeontx2/
F: doc/guides/eventdevs/octeontx2.rst
NXP DPAA eventdev NXP DPAA eventdev
M: Hemant Agrawal <hemant.agrawal@nxp.com> M: Hemant Agrawal <hemant.agrawal@nxp.com>
M: Nipun Gupta <nipun.gupta@nxp.com> M: Nipun Gupta <nipun.gupta@nxp.com>

View File

@ -341,7 +341,6 @@ driver_test_names = [
'cryptodev_dpaa_sec_autotest', 'cryptodev_dpaa_sec_autotest',
'cryptodev_dpaa2_sec_autotest', 'cryptodev_dpaa2_sec_autotest',
'cryptodev_null_autotest', 'cryptodev_null_autotest',
'cryptodev_octeontx2_autotest',
'cryptodev_openssl_autotest', 'cryptodev_openssl_autotest',
'cryptodev_openssl_asym_autotest', 'cryptodev_openssl_asym_autotest',
'cryptodev_qat_autotest', 'cryptodev_qat_autotest',

View File

@ -15615,12 +15615,6 @@ test_cryptodev_octeontx(void)
return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_OCTEONTX_SYM_PMD)); return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_OCTEONTX_SYM_PMD));
} }
static int
test_cryptodev_octeontx2(void)
{
return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_OCTEONTX2_PMD));
}
static int static int
test_cryptodev_caam_jr(void) test_cryptodev_caam_jr(void)
{ {
@ -15733,7 +15727,6 @@ REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp); REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
REGISTER_TEST_COMMAND(cryptodev_virtio_autotest, test_cryptodev_virtio); REGISTER_TEST_COMMAND(cryptodev_virtio_autotest, test_cryptodev_virtio);
REGISTER_TEST_COMMAND(cryptodev_octeontx_autotest, test_cryptodev_octeontx); REGISTER_TEST_COMMAND(cryptodev_octeontx_autotest, test_cryptodev_octeontx);
REGISTER_TEST_COMMAND(cryptodev_octeontx2_autotest, test_cryptodev_octeontx2);
REGISTER_TEST_COMMAND(cryptodev_caam_jr_autotest, test_cryptodev_caam_jr); REGISTER_TEST_COMMAND(cryptodev_caam_jr_autotest, test_cryptodev_caam_jr);
REGISTER_TEST_COMMAND(cryptodev_nitrox_autotest, test_cryptodev_nitrox); REGISTER_TEST_COMMAND(cryptodev_nitrox_autotest, test_cryptodev_nitrox);
REGISTER_TEST_COMMAND(cryptodev_bcmfs_autotest, test_cryptodev_bcmfs); REGISTER_TEST_COMMAND(cryptodev_bcmfs_autotest, test_cryptodev_bcmfs);

View File

@ -68,7 +68,6 @@
#define CRYPTODEV_NAME_CCP_PMD crypto_ccp #define CRYPTODEV_NAME_CCP_PMD crypto_ccp
#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio #define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
#define CRYPTODEV_NAME_OCTEONTX_SYM_PMD crypto_octeontx #define CRYPTODEV_NAME_OCTEONTX_SYM_PMD crypto_octeontx
#define CRYPTODEV_NAME_OCTEONTX2_PMD crypto_octeontx2
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr #define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
#define CRYPTODEV_NAME_BCMFS_PMD crypto_bcmfs #define CRYPTODEV_NAME_BCMFS_PMD crypto_bcmfs

View File

@ -2375,20 +2375,6 @@ test_cryptodev_octeontx_asym(void)
return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite); return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
} }
static int
test_cryptodev_octeontx2_asym(void)
{
gbl_driver_id = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_OCTEONTX2_PMD));
if (gbl_driver_id == -1) {
RTE_LOG(ERR, USER1, "OCTEONTX2 PMD must be loaded.\n");
return TEST_FAILED;
}
/* Use test suite registered for crypto_octeontx PMD */
return unit_test_suite_runner(&cryptodev_octeontx_asym_testsuite);
}
static int static int
test_cryptodev_cn9k_asym(void) test_cryptodev_cn9k_asym(void)
{ {
@ -2424,8 +2410,5 @@ REGISTER_TEST_COMMAND(cryptodev_qat_asym_autotest, test_cryptodev_qat_asym);
REGISTER_TEST_COMMAND(cryptodev_octeontx_asym_autotest, REGISTER_TEST_COMMAND(cryptodev_octeontx_asym_autotest,
test_cryptodev_octeontx_asym); test_cryptodev_octeontx_asym);
REGISTER_TEST_COMMAND(cryptodev_octeontx2_asym_autotest,
test_cryptodev_octeontx2_asym);
REGISTER_TEST_COMMAND(cryptodev_cn9k_asym_autotest, test_cryptodev_cn9k_asym); REGISTER_TEST_COMMAND(cryptodev_cn9k_asym_autotest, test_cryptodev_cn9k_asym);
REGISTER_TEST_COMMAND(cryptodev_cn10k_asym_autotest, test_cryptodev_cn10k_asym); REGISTER_TEST_COMMAND(cryptodev_cn10k_asym_autotest, test_cryptodev_cn10k_asym);

View File

@ -1018,12 +1018,6 @@ test_eventdev_selftest_octeontx(void)
return test_eventdev_selftest_impl("event_octeontx", ""); return test_eventdev_selftest_impl("event_octeontx", "");
} }
static int
test_eventdev_selftest_octeontx2(void)
{
return test_eventdev_selftest_impl("event_octeontx2", "");
}
static int static int
test_eventdev_selftest_dpaa2(void) test_eventdev_selftest_dpaa2(void)
{ {
@ -1052,8 +1046,6 @@ REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw); REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
REGISTER_TEST_COMMAND(eventdev_selftest_octeontx, REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
test_eventdev_selftest_octeontx); test_eventdev_selftest_octeontx);
REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
test_eventdev_selftest_octeontx2);
REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2); REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2); REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k); REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);

View File

@ -14,4 +14,3 @@ endian = 'little'
[properties] [properties]
platform = 'cn10k' platform = 'cn10k'
disable_drivers = 'common/octeontx2'

View File

@ -13,5 +13,4 @@ cpu = 'armv8-a'
endian = 'little' endian = 'little'
[properties] [properties]
platform = 'octeontx2' platform = 'cn9k'
disable_drivers = 'common/cnxk'

View File

@ -139,7 +139,7 @@ implementer_cavium = {
'march_features': ['crc', 'crypto', 'lse'], 'march_features': ['crc', 'crypto', 'lse'],
'compiler_options': ['-mcpu=octeontx2'], 'compiler_options': ['-mcpu=octeontx2'],
'flags': [ 'flags': [
['RTE_MACHINE', '"octeontx2"'], ['RTE_MACHINE', '"cn9k"'],
['RTE_ARM_FEATURE_ATOMICS', true], ['RTE_ARM_FEATURE_ATOMICS', true],
['RTE_USE_C11_MEM_MODEL', true], ['RTE_USE_C11_MEM_MODEL', true],
['RTE_MAX_LCORE', 36], ['RTE_MAX_LCORE', 36],
@ -340,8 +340,8 @@ soc_n2 = {
'numa': false 'numa': false
} }
soc_octeontx2 = { soc_cn9k = {
'description': 'Marvell OCTEON TX2', 'description': 'Marvell OCTEON 9',
'implementer': '0x43', 'implementer': '0x43',
'part_number': '0xb2', 'part_number': '0xb2',
'numa': false 'numa': false
@ -377,6 +377,7 @@ generic_aarch32: Generic un-optimized build for armv8 aarch32 execution mode.
armada: Marvell ARMADA armada: Marvell ARMADA
bluefield: NVIDIA BlueField bluefield: NVIDIA BlueField
centriq2400: Qualcomm Centriq 2400 centriq2400: Qualcomm Centriq 2400
cn9k: Marvell OCTEON 9
cn10k: Marvell OCTEON 10 cn10k: Marvell OCTEON 10
dpaa: NXP DPAA dpaa: NXP DPAA
emag: Ampere eMAG emag: Ampere eMAG
@ -385,7 +386,6 @@ kunpeng920: HiSilicon Kunpeng 920
kunpeng930: HiSilicon Kunpeng 930 kunpeng930: HiSilicon Kunpeng 930
n1sdp: Arm Neoverse N1SDP n1sdp: Arm Neoverse N1SDP
n2: Arm Neoverse N2 n2: Arm Neoverse N2
octeontx2: Marvell OCTEON TX2
stingray: Broadcom Stingray stingray: Broadcom Stingray
thunderx2: Marvell ThunderX2 T99 thunderx2: Marvell ThunderX2 T99
thunderxt88: Marvell ThunderX T88 thunderxt88: Marvell ThunderX T88
@ -399,6 +399,7 @@ socs = {
'armada': soc_armada, 'armada': soc_armada,
'bluefield': soc_bluefield, 'bluefield': soc_bluefield,
'centriq2400': soc_centriq2400, 'centriq2400': soc_centriq2400,
'cn9k': soc_cn9k,
'cn10k' : soc_cn10k, 'cn10k' : soc_cn10k,
'dpaa': soc_dpaa, 'dpaa': soc_dpaa,
'emag': soc_emag, 'emag': soc_emag,
@ -407,7 +408,6 @@ socs = {
'kunpeng930': soc_kunpeng930, 'kunpeng930': soc_kunpeng930,
'n1sdp': soc_n1sdp, 'n1sdp': soc_n1sdp,
'n2': soc_n2, 'n2': soc_n2,
'octeontx2': soc_octeontx2,
'stingray': soc_stingray, 'stingray': soc_stingray,
'thunderx2': soc_thunderx2, 'thunderx2': soc_thunderx2,
'thunderxt88': soc_thunderxt88 'thunderxt88': soc_thunderxt88

View File

@ -48,7 +48,7 @@ for dump in $(find $refdir -name "*.dump"); do
echo "Skipped removed driver $name." echo "Skipped removed driver $name."
continue continue
fi fi
if grep -qE "\<librte_regex_octeontx2" $dump; then if grep -qE "\<librte_*.*_octeontx2" $dump; then
echo "Skipped removed driver $name." echo "Skipped removed driver $name."
continue continue
fi fi

View File

@ -1,87 +0,0 @@
;
; Supported features of the 'octeontx2' crypto driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Symmetric crypto = Y
Asymmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
Protocol offload = Y
In Place SGL = Y
OOP SGL In LB Out = Y
OOP SGL In SGL Out = Y
OOP LB In LB Out = Y
RSA PRIV OP KEY QT = Y
Digest encrypted = Y
Symmetric sessionless = Y
;
; Supported crypto algorithms of 'octeontx2' crypto driver.
;
[Cipher]
NULL = Y
3DES CBC = Y
3DES ECB = Y
AES CBC (128) = Y
AES CBC (192) = Y
AES CBC (256) = Y
AES CTR (128) = Y
AES CTR (192) = Y
AES CTR (256) = Y
AES XTS (128) = Y
AES XTS (256) = Y
DES CBC = Y
KASUMI F8 = Y
SNOW3G UEA2 = Y
ZUC EEA3 = Y
;
; Supported authentication algorithms of 'octeontx2' crypto driver.
;
[Auth]
NULL = Y
AES GMAC = Y
KASUMI F9 = Y
MD5 = Y
MD5 HMAC = Y
SHA1 = Y
SHA1 HMAC = Y
SHA224 = Y
SHA224 HMAC = Y
SHA256 = Y
SHA256 HMAC = Y
SHA384 = Y
SHA384 HMAC = Y
SHA512 = Y
SHA512 HMAC = Y
SNOW3G UIA2 = Y
ZUC EIA3 = Y
;
; Supported AEAD algorithms of 'octeontx2' crypto driver.
;
[AEAD]
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
CHACHA20-POLY1305 = Y
;
; Supported Asymmetric algorithms of the 'octeontx2' crypto driver.
;
[Asymmetric]
RSA = Y
DSA =
Modular Exponentiation = Y
Modular Inversion =
Diffie-hellman =
ECDSA = Y
ECPM = Y
;
; Supported Operating systems of the 'octeontx2' crypto driver.
;
[OS]
Linux = Y

View File

@ -22,7 +22,6 @@ Crypto Device Drivers
dpaa_sec dpaa_sec
kasumi kasumi
octeontx octeontx
octeontx2
openssl openssl
mlx5 mlx5
mvsam mvsam

View File

@ -1,188 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2019 Marvell International Ltd.
Marvell OCTEON TX2 Crypto Poll Mode Driver
==========================================
The OCTEON TX2 crypto poll mode driver provides support for offloading
cryptographic operations to cryptographic accelerator units on the
**OCTEON TX2** :sup:`®` family of processors (CN9XXX).
More information about OCTEON TX2 SoCs may be obtained from `<https://www.marvell.com>`_
Features
--------
The OCTEON TX2 crypto PMD has support for:
Symmetric Crypto Algorithms
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Cipher algorithms:
* ``RTE_CRYPTO_CIPHER_NULL``
* ``RTE_CRYPTO_CIPHER_3DES_CBC``
* ``RTE_CRYPTO_CIPHER_3DES_ECB``
* ``RTE_CRYPTO_CIPHER_AES_CBC``
* ``RTE_CRYPTO_CIPHER_AES_CTR``
* ``RTE_CRYPTO_CIPHER_AES_XTS``
* ``RTE_CRYPTO_CIPHER_DES_CBC``
* ``RTE_CRYPTO_CIPHER_KASUMI_F8``
* ``RTE_CRYPTO_CIPHER_SNOW3G_UEA2``
* ``RTE_CRYPTO_CIPHER_ZUC_EEA3``
Hash algorithms:
* ``RTE_CRYPTO_AUTH_NULL``
* ``RTE_CRYPTO_AUTH_AES_GMAC``
* ``RTE_CRYPTO_AUTH_KASUMI_F9``
* ``RTE_CRYPTO_AUTH_MD5``
* ``RTE_CRYPTO_AUTH_MD5_HMAC``
* ``RTE_CRYPTO_AUTH_SHA1``
* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
* ``RTE_CRYPTO_AUTH_SHA224``
* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
* ``RTE_CRYPTO_AUTH_SHA256``
* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
* ``RTE_CRYPTO_AUTH_SHA384``
* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
* ``RTE_CRYPTO_AUTH_SHA512``
* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
* ``RTE_CRYPTO_AUTH_SNOW3G_UIA2``
* ``RTE_CRYPTO_AUTH_ZUC_EIA3``
AEAD algorithms:
* ``RTE_CRYPTO_AEAD_AES_GCM``
* ``RTE_CRYPTO_AEAD_CHACHA20_POLY1305``
Asymmetric Crypto Algorithms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* ``RTE_CRYPTO_ASYM_XFORM_RSA``
* ``RTE_CRYPTO_ASYM_XFORM_MODEX``
Installation
------------
The OCTEON TX2 crypto PMD may be compiled natively on an OCTEON TX2 platform or
cross-compiled on an x86 platform.
Refer to :doc:`../platform/octeontx2` for instructions to build your DPDK
application.
.. note::
The OCTEON TX2 crypto PMD uses services from the kernel mode OCTEON TX2
crypto PF driver in linux. This driver is included in the OCTEON TX SDK.
Initialization
--------------
List the CPT PF devices available on your OCTEON TX2 platform:
.. code-block:: console
lspci -d:a0fd
``a0fd`` is the CPT PF device id. You should see output similar to:
.. code-block:: console
0002:10:00.0 Class 1080: Device 177d:a0fd
Set ``sriov_numvfs`` on the CPT PF device, to create a VF:
.. code-block:: console
echo 1 > /sys/bus/pci/drivers/octeontx2-cpt/0002:10:00.0/sriov_numvfs
Bind the CPT VF device to the vfio_pci driver:
.. code-block:: console
echo '177d a0fe' > /sys/bus/pci/drivers/vfio-pci/new_id
echo 0002:10:00.1 > /sys/bus/pci/devices/0002:10:00.1/driver/unbind
echo 0002:10:00.1 > /sys/bus/pci/drivers/vfio-pci/bind
Another way to bind the VF would be to use the ``dpdk-devbind.py`` script:
.. code-block:: console
cd <dpdk directory>
./usertools/dpdk-devbind.py -u 0002:10:00.1
./usertools/dpdk-devbind.py -b vfio-pci 0002:10.00.1
.. note::
* For CN98xx SoC, it is recommended to use even and odd DBDF VFs to achieve
higher performance as even VF uses one crypto engine and odd one uses
another crypto engine.
* Ensure that sufficient huge pages are available for your application::
dpdk-hugepages.py --setup 4G --pagesize 512M
Refer to :ref:`linux_gsg_hugepages` for more details.
Debugging Options
-----------------
.. _table_octeontx2_crypto_debug_options:
.. table:: OCTEON TX2 crypto PMD debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | CPT | --log-level='pmd\.crypto\.octeontx2,8' |
+---+------------+-------------------------------------------------------+
Testing
-------
The symmetric crypto operations on OCTEON TX2 crypto PMD may be verified by running the test
application:
.. code-block:: console
./dpdk-test
RTE>>cryptodev_octeontx2_autotest
The asymmetric crypto operations on OCTEON TX2 crypto PMD may be verified by running the test
application:
.. code-block:: console
./dpdk-test
RTE>>cryptodev_octeontx2_asym_autotest
Lookaside IPsec Support
-----------------------
The OCTEON TX2 SoC can accelerate IPsec traffic in lookaside protocol mode,
with its **cryptographic accelerator (CPT)**. ``OCTEON TX2 crypto PMD`` implements
this as an ``RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL`` offload.
Refer to :doc:`../prog_guide/rte_security` for more details on protocol offloads.
This feature can be tested with ipsec-secgw sample application.
Features supported
~~~~~~~~~~~~~~~~~~
* IPv4
* IPv6
* ESP
* Tunnel mode
* Transport mode(IPv4)
* ESN
* Anti-replay
* UDP Encapsulation
* AES-128/192/256-GCM
* AES-128/192/256-CBC-SHA1-HMAC
* AES-128/192/256-CBC-SHA256-128-HMAC

View File

@ -7,7 +7,7 @@ CNXK DMA Device Driver
====================== ======================
The ``cnxk`` dmadev driver provides a poll-mode driver (PMD) for Marvell DPI DMA The ``cnxk`` dmadev driver provides a poll-mode driver (PMD) for Marvell DPI DMA
Hardware Accelerator block found in OCTEONTX2 and OCTEONTX3 family of SoCs. Hardware Accelerator block found in OCTEON 9 and OCTEON 10 family of SoCs.
Each DMA queue is exposed as a VF function when SRIOV is enabled. Each DMA queue is exposed as a VF function when SRIOV is enabled.
The block supports following modes of DMA transfers: The block supports following modes of DMA transfers:

View File

@ -1,30 +0,0 @@
;
; Supported features of the 'octeontx2' eventdev driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Scheduling Features]
queue_qos = Y
distributed_sched = Y
queue_all_types = Y
nonseq_mode = Y
runtime_port_link = Y
multiple_queue_port = Y
carry_flow_id = Y
maintenance_free = Y
[Eth Rx adapter Features]
internal_port = Y
multi_eventq = Y
[Eth Tx adapter Features]
internal_port = Y
[Crypto adapter Features]
internal_port_op_new = Y
internal_port_op_fwd = Y
internal_port_qp_ev_bind = Y
[Timer adapter Features]
internal_port = Y
periodic = Y

View File

@ -19,5 +19,4 @@ application through the eventdev API.
dsw dsw
sw sw
octeontx octeontx
octeontx2
opdl opdl

View File

@ -1,178 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2019 Marvell International Ltd.
OCTEON TX2 SSO Eventdev Driver
===============================
The OCTEON TX2 SSO PMD (**librte_event_octeontx2**) provides poll mode
eventdev driver support for the inbuilt event device found in the **Marvell OCTEON TX2**
SoC family.
More information about OCTEON TX2 SoC can be found at `Marvell Official Website
<https://www.marvell.com/embedded-processors/infrastructure-processors/>`_.
Features
--------
Features of the OCTEON TX2 SSO PMD are:
- 256 Event queues
- 26 (dual) and 52 (single) Event ports
- HW event scheduler
- Supports 1M flows per event queue
- Flow based event pipelining
- Flow pinning support in flow based event pipelining
- Queue based event pipelining
- Supports ATOMIC, ORDERED, PARALLEL schedule types per flow
- Event scheduling QoS based on event queue priority
- Open system with configurable amount of outstanding events limited only by
DRAM
- HW accelerated dequeue timeout support to enable power management
- HW managed event timers support through TIM, with high precision and
time granularity of 2.5us.
- Up to 256 TIM rings aka event timer adapters.
- Up to 8 rings traversed in parallel.
- HW managed packets enqueued from ethdev to eventdev exposed through event eth
RX adapter.
- N:1 ethernet device Rx queue to Event queue mapping.
- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
capability while maintaining receive packet order.
- Full Rx/Tx offload support defined through ethdev queue config.
Prerequisites and Compilation procedure
---------------------------------------
See :doc:`../platform/octeontx2` for setup information.
Runtime Config Options
----------------------
- ``Maximum number of in-flight events`` (default ``8192``)
In **Marvell OCTEON TX2** the max number of in-flight events are only limited
by DRAM size, the ``xae_cnt`` devargs parameter is introduced to provide
upper limit for in-flight events.
For example::
-a 0002:0e:00.0,xae_cnt=16384
- ``Force legacy mode``
The ``single_ws`` devargs parameter is introduced to force legacy mode i.e
single workslot mode in SSO and disable the default dual workslot mode.
For example::
-a 0002:0e:00.0,single_ws=1
- ``Event Group QoS support``
SSO GGRPs i.e. queue uses DRAM & SRAM buffers to hold in-flight
events. By default the buffers are assigned to the SSO GGRPs to
satisfy minimum HW requirements. SSO is free to assign the remaining
buffers to GGRPs based on a preconfigured threshold.
We can control the QoS of SSO GGRP by modifying the above mentioned
thresholds. GGRPs that have higher importance can be assigned higher
thresholds than the rest. The dictionary format is as follows
[Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] expressed in percentages, 0 represents
default.
For example::
-a 0002:0e:00.0,qos=[1-50-50-50]
- ``TIM disable NPA``
By default chunks are allocated from NPA then TIM can automatically free
them when traversing the list of chunks. The ``tim_disable_npa`` devargs
parameter disables NPA and uses software mempool to manage chunks
For example::
-a 0002:0e:00.0,tim_disable_npa=1
- ``TIM modify chunk slots``
The ``tim_chnk_slots`` devargs can be used to modify number of chunk slots.
Chunks are used to store event timers, a chunk can be visualised as an array
where the last element points to the next chunk and rest of them are used to
store events. TIM traverses the list of chunks and enqueues the event timers
to SSO. The default value is 255 and the max value is 4095.
For example::
-a 0002:0e:00.0,tim_chnk_slots=1023
- ``TIM enable arm/cancel statistics``
The ``tim_stats_ena`` devargs can be used to enable arm and cancel stats of
event timer adapter.
For example::
-a 0002:0e:00.0,tim_stats_ena=1
- ``TIM limit max rings reserved``
The ``tim_rings_lmt`` devargs can be used to limit the max number of TIM
rings i.e. event timer adapter reserved on probe. Since, TIM rings are HW
resources we can avoid starving other applications by not grabbing all the
rings.
For example::
-a 0002:0e:00.0,tim_rings_lmt=5
- ``TIM ring control internal parameters``
When using multiple TIM rings the ``tim_ring_ctl`` devargs can be used to
control each TIM rings internal parameters uniquely. The following dict
format is expected [ring-chnk_slots-disable_npa-stats_ena]. 0 represents
default values.
For Example::
-a 0002:0e:00.0,tim_ring_ctl=[2-1023-1-0]
- ``Lock NPA contexts in NDC``
Lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
For example::
-a 0002:0e:00.0,npa_lock_mask=0xf
- ``Force Rx Back pressure``
Force Rx back pressure when same mempool is used across ethernet device
connected to event device.
For example::
-a 0002:0e:00.0,force_rx_bp=1
Debugging Options
-----------------
.. _table_octeontx2_event_debug_options:
.. table:: OCTEON TX2 event device debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | SSO | --log-level='pmd\.event\.octeontx2,8' |
+---+------------+-------------------------------------------------------+
| 2 | TIM | --log-level='pmd\.event\.octeontx2\.timer,8' |
+---+------------+-------------------------------------------------------+
Limitations
-----------
Rx adapter support
~~~~~~~~~~~~~~~~~~
Using the same mempool for all the ethernet device ports connected to
event device would cause back pressure to be asserted only on the first
ethernet device.
Back pressure is automatically disabled when using same mempool for all the
ethernet devices connected to event device to override this applications can
use `force_rx_bp=1` device arguments.
Using unique mempool per each ethernet device is recommended when they are
connected to event device.

View File

@ -13,6 +13,5 @@ application through the mempool API.
cnxk cnxk
octeontx octeontx
octeontx2
ring ring
stack stack

View File

@ -1,92 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2019 Marvell International Ltd.
OCTEON TX2 NPA Mempool Driver
=============================
The OCTEON TX2 NPA PMD (**librte_mempool_octeontx2**) provides mempool
driver support for the integrated mempool device found in **Marvell OCTEON TX2** SoC family.
More information about OCTEON TX2 SoC can be found at `Marvell Official Website
<https://www.marvell.com/embedded-processors/infrastructure-processors/>`_.
Features
--------
OCTEON TX2 NPA PMD supports:
- Up to 128 NPA LFs
- 1M Pools per LF
- HW mempool manager
- Ethdev Rx buffer allocation in HW to save CPU cycles in the Rx path.
- Ethdev Tx buffer recycling in HW to save CPU cycles in the Tx path.
Prerequisites and Compilation procedure
---------------------------------------
See :doc:`../platform/octeontx2` for setup information.
Pre-Installation Configuration
------------------------------
Runtime Config Options
~~~~~~~~~~~~~~~~~~~~~~
- ``Maximum number of mempools per application`` (default ``128``)
The maximum number of mempools per application needs to be configured on
HW during mempool driver initialization. HW can support up to 1M mempools,
Since each mempool costs set of HW resources, the ``max_pools`` ``devargs``
parameter is being introduced to configure the number of mempools required
for the application.
For example::
-a 0002:02:00.0,max_pools=512
With the above configuration, the driver will set up only 512 mempools for
the given application to save HW resources.
.. note::
Since this configuration is per application, the end user needs to
provide ``max_pools`` parameter to the first PCIe device probed by the given
application.
- ``Lock NPA contexts in NDC``
Lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
For example::
-a 0002:02:00.0,npa_lock_mask=0xf
Debugging Options
~~~~~~~~~~~~~~~~~
.. _table_octeontx2_mempool_debug_options:
.. table:: OCTEON TX2 mempool debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | NPA | --log-level='pmd\.mempool.octeontx2,8' |
+---+------------+-------------------------------------------------------+
Standalone mempool device
~~~~~~~~~~~~~~~~~~~~~~~~~
The ``usertools/dpdk-devbind.py`` script shall enumerate all the mempool devices
available in the system. In order to avoid, the end user to bind the mempool
device prior to use ethdev and/or eventdev device, the respective driver
configures an NPA LF and attach to the first probed ethdev or eventdev device.
In case, if end user need to run mempool as a standalone device
(without ethdev or eventdev), end user needs to bind a mempool device using
``usertools/dpdk-devbind.py``
Example command to run ``mempool_autotest`` test with standalone OCTEONTX2 NPA device::
echo "mempool_autotest" | <build_dir>/app/test/dpdk-test -c 0xf0 --mbuf-pool-ops-name="octeontx2_npa"

View File

@ -178,7 +178,7 @@ Runtime Config Options
* ``rss_adder<7:0> = flow_tag<7:0>`` * ``rss_adder<7:0> = flow_tag<7:0>``
Latter one aligns with standard NIC behavior vs former one is a legacy Latter one aligns with standard NIC behavior vs former one is a legacy
RSS adder scheme used in OCTEON TX2 products. RSS adder scheme used in OCTEON 9 products.
By default, the driver runs in the latter mode. By default, the driver runs in the latter mode.
Setting this flag to 1 to select the legacy mode. Setting this flag to 1 to select the legacy mode.
@ -291,7 +291,7 @@ Limitations
The OCTEON CN9K/CN10K SoC family NIC has inbuilt HW assisted external mempool manager. The OCTEON CN9K/CN10K SoC family NIC has inbuilt HW assisted external mempool manager.
``net_cnxk`` PMD only works with ``mempool_cnxk`` mempool handler ``net_cnxk`` PMD only works with ``mempool_cnxk`` mempool handler
as it is performance wise most effective way for packet allocation and Tx buffer as it is performance wise most effective way for packet allocation and Tx buffer
recycling on OCTEON TX2 SoC platform. recycling on OCTEON 9 SoC platform.
CRC stripping CRC stripping
~~~~~~~~~~~~~ ~~~~~~~~~~~~~

View File

@ -1,97 +0,0 @@
;
; Supported features of the 'octeontx2' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Speed capabilities = Y
Rx interrupt = Y
Lock-free Tx queue = Y
SR-IOV = Y
Multiprocess aware = Y
Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
Burst mode info = Y
Fast mbuf free = Y
Free Tx mbuf on demand = Y
Queue start/stop = Y
MTU update = Y
TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
Inner RSS = Y
Inline protocol = Y
VLAN filter = Y
Flow control = Y
Rate limitation = Y
Scattered Rx = Y
VLAN offload = Y
QinQ offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Timesync = Y
Timestamp offload = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Stats per queue = Y
Extended stats = Y
FW version = Y
Module EEPROM dump = Y
Registers dump = Y
Linux = Y
ARMv8 = Y
Usage doc = Y
[rte_flow items]
any = Y
arp_eth_ipv4 = Y
esp = Y
eth = Y
e_tag = Y
geneve = Y
gre = Y
gre_key = Y
gtpc = Y
gtpu = Y
higig2 = Y
icmp = Y
ipv4 = Y
ipv6 = Y
ipv6_ext = Y
mpls = Y
nvgre = Y
raw = Y
sctp = Y
tcp = Y
udp = Y
vlan = Y
vxlan = Y
vxlan_gpe = Y
[rte_flow actions]
count = Y
drop = Y
flag = Y
mark = Y
of_pop_vlan = Y
of_push_vlan = Y
of_set_vlan_pcp = Y
of_set_vlan_vid = Y
pf = Y
port_id = Y
port_representor = Y
queue = Y
rss = Y
security = Y
vf = Y

View File

@ -1,48 +0,0 @@
;
; Supported features of the 'octeontx2_vec' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Speed capabilities = Y
Lock-free Tx queue = Y
SR-IOV = Y
Multiprocess aware = Y
Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
Burst mode info = Y
Fast mbuf free = Y
Free Tx mbuf on demand = Y
Queue start/stop = Y
MTU update = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
Inner RSS = Y
VLAN filter = Y
Flow control = Y
Rate limitation = Y
VLAN offload = Y
QinQ offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
FW version = Y
Module EEPROM dump = Y
Registers dump = Y
Linux = Y
ARMv8 = Y
Usage doc = Y

View File

@ -1,45 +0,0 @@
;
; Supported features of the 'octeontx2_vf' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
Speed capabilities = Y
Lock-free Tx queue = Y
Multiprocess aware = Y
Rx interrupt = Y
Link status = Y
Link status event = Y
Runtime Rx queue setup = Y
Runtime Tx queue setup = Y
Burst mode info = Y
Fast mbuf free = Y
Free Tx mbuf on demand = Y
Queue start/stop = Y
TSO = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
Inner RSS = Y
Inline protocol = Y
VLAN filter = Y
Rate limitation = Y
Scattered Rx = Y
VLAN offload = Y
QinQ offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
FW version = Y
Module EEPROM dump = Y
Registers dump = Y
Linux = Y
ARMv8 = Y
Usage doc = Y

View File

@ -52,7 +52,6 @@ Network Interface Controller Drivers
ngbe ngbe
null null
octeontx octeontx
octeontx2
octeontx_ep octeontx_ep
pfe pfe
qede qede

View File

@ -1,465 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(C) 2019 Marvell International Ltd.
OCTEON TX2 Poll Mode driver
===========================
The OCTEON TX2 ETHDEV PMD (**librte_net_octeontx2**) provides poll mode ethdev
driver support for the inbuilt network device found in **Marvell OCTEON TX2**
SoC family as well as for their virtual functions (VF) in SR-IOV context.
More information can be found at `Marvell Official Website
<https://www.marvell.com/embedded-processors/infrastructure-processors>`_.
Features
--------
Features of the OCTEON TX2 Ethdev PMD are:
- Packet type information
- Promiscuous mode
- Jumbo frames
- SR-IOV VF
- Lock-free Tx queue
- Multiple queues for TX and RX
- Receiver Side Scaling (RSS)
- MAC/VLAN filtering
- Multicast MAC filtering
- Generic flow API
- Inner and Outer Checksum offload
- VLAN/QinQ stripping and insertion
- Port hardware statistics
- Link state information
- Link flow control
- MTU update
- Scatter-Gather IO support
- Vector Poll mode driver
- Debug utilities - Context dump and error interrupt support
- IEEE1588 timestamping
- HW offloaded `ethdev Rx queue` to `eventdev event queue` packet injection
- Support Rx interrupt
- Inline IPsec processing support
- :ref:`Traffic Management API <otx2_tmapi>`
Prerequisites
-------------
See :doc:`../platform/octeontx2` for setup information.
Driver compilation and testing
------------------------------
Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
for details.
#. Running testpmd:
Follow instructions available in the document
:ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
to run testpmd.
Example output:
.. code-block:: console
./<build_dir>/app/dpdk-testpmd -c 0x300 -a 0002:02:00.0 -- --portmask=0x1 --nb-cores=1 --port-topology=loop --rxq=1 --txq=1
EAL: Detected 24 lcore(s)
EAL: Detected 1 NUMA nodes
EAL: Multi-process socket /var/run/dpdk/rte/mp_socket
EAL: No available hugepages reported in hugepages-2048kB
EAL: Probing VFIO support...
EAL: VFIO support initialized
EAL: PCI device 0002:02:00.0 on NUMA socket 0
EAL: probe driver: 177d:a063 net_octeontx2
EAL: using IOMMU type 1 (Type 1)
testpmd: create a new mbuf pool <mbuf_pool_socket_0>: n=267456, size=2176, socket=0
testpmd: preferred mempool ops selected: octeontx2_npa
Configuring Port 0 (socket 0)
PMD: Port 0: Link Up - speed 40000 Mbps - full-duplex
Port 0: link state change event
Port 0: 36:10:66:88:7A:57
Checking link statuses...
Done
No commandline core given, start packet forwarding
io packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support enabled, MP allocation mode: native
Logical Core 9 (socket 0) forwards packets on 1 streams:
RX P=0/Q=0 (socket 0) -> TX P=0/Q=0 (socket 0) peer=02:00:00:00:00:00
io packet forwarding packets/burst=32
nb forwarding cores=1 - nb forwarding ports=1
port 0: RX queue number: 1 Tx queue number: 1
Rx offloads=0x0 Tx offloads=0x10000
RX queue: 0
RX desc=512 - RX free threshold=0
RX threshold registers: pthresh=0 hthresh=0 wthresh=0
RX Offloads=0x0
TX queue: 0
TX desc=512 - TX free threshold=0
TX threshold registers: pthresh=0 hthresh=0 wthresh=0
TX offloads=0x10000 - TX RS bit threshold=0
Press enter to exit
Runtime Config Options
----------------------
- ``Rx&Tx scalar mode enable`` (default ``0``)
Ethdev supports both scalar and vector mode, it may be selected at runtime
using ``scalar_enable`` ``devargs`` parameter.
- ``RSS reta size`` (default ``64``)
RSS redirection table size may be configured during runtime using ``reta_size``
``devargs`` parameter.
For example::
-a 0002:02:00.0,reta_size=256
With the above configuration, reta table of size 256 is populated.
- ``Flow priority levels`` (default ``3``)
RTE Flow priority levels can be configured during runtime using
``flow_max_priority`` ``devargs`` parameter.
For example::
-a 0002:02:00.0,flow_max_priority=10
With the above configuration, priority level was set to 10 (0-9). Max
priority level supported is 32.
- ``Reserve Flow entries`` (default ``8``)
RTE flow entries can be pre allocated and the size of pre allocation can be
selected runtime using ``flow_prealloc_size`` ``devargs`` parameter.
For example::
-a 0002:02:00.0,flow_prealloc_size=4
With the above configuration, pre alloc size was set to 4. Max pre alloc
size supported is 32.
- ``Max SQB buffer count`` (default ``512``)
Send queue descriptor buffer count may be limited during runtime using
``max_sqb_count`` ``devargs`` parameter.
For example::
-a 0002:02:00.0,max_sqb_count=64
With the above configuration, each send queue's descriptor buffer count is
limited to a maximum of 64 buffers.
- ``Switch header enable`` (default ``none``)
A port can be configured to a specific switch header type by using
``switch_header`` ``devargs`` parameter.
For example::
-a 0002:02:00.0,switch_header="higig2"
With the above configuration, higig2 will be enabled on that port and the
traffic on this port should be higig2 traffic only. Supported switch header
types are "chlen24b", "chlen90b", "dsa", "exdsa", "higig2" and "vlan_exdsa".
- ``RSS tag as XOR`` (default ``0``)
C0 HW revision onward, The HW gives an option to configure the RSS adder as
* ``rss_adder<7:0> = flow_tag<7:0> ^ flow_tag<15:8> ^ flow_tag<23:16> ^ flow_tag<31:24>``
* ``rss_adder<7:0> = flow_tag<7:0>``
Latter one aligns with standard NIC behavior vs former one is a legacy
RSS adder scheme used in OCTEON TX2 products.
By default, the driver runs in the latter mode from C0 HW revision onward.
Setting this flag to 1 to select the legacy mode.
For example to select the legacy mode(RSS tag adder as XOR)::
-a 0002:02:00.0,tag_as_xor=1
- ``Max SPI for inbound inline IPsec`` (default ``1``)
Max SPI supported for inbound inline IPsec processing can be specified by
``ipsec_in_max_spi`` ``devargs`` parameter.
For example::
-a 0002:02:00.0,ipsec_in_max_spi=128
With the above configuration, application can enable inline IPsec processing
on 128 SAs (SPI 0-127).
- ``Lock Rx contexts in NDC cache``
Lock Rx contexts in NDC cache by using ``lock_rx_ctx`` parameter.
For example::
-a 0002:02:00.0,lock_rx_ctx=1
- ``Lock Tx contexts in NDC cache``
Lock Tx contexts in NDC cache by using ``lock_tx_ctx`` parameter.
For example::
-a 0002:02:00.0,lock_tx_ctx=1
.. note::
Above devarg parameters are configurable per device, user needs to pass the
parameters to all the PCIe devices if application requires to configure on
all the ethdev ports.
- ``Lock NPA contexts in NDC``
Lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
For example::
-a 0002:02:00.0,npa_lock_mask=0xf
.. _otx2_tmapi:
Traffic Management API
----------------------
OCTEON TX2 PMD supports generic DPDK Traffic Management API which allows to
configure the following features:
#. Hierarchical scheduling
#. Single rate - Two color, Two rate - Three color shaping
Both DWRR and Static Priority(SP) hierarchical scheduling is supported.
Every parent can have atmost 10 SP Children and unlimited DWRR children.
Both PF & VF supports traffic management API with PF supporting 6 levels
and VF supporting 5 levels of topology.
Limitations
-----------
``mempool_octeontx2`` external mempool handler dependency
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The OCTEON TX2 SoC family NIC has inbuilt HW assisted external mempool manager.
``net_octeontx2`` PMD only works with ``mempool_octeontx2`` mempool handler
as it is performance wise most effective way for packet allocation and Tx buffer
recycling on OCTEON TX2 SoC platform.
CRC stripping
~~~~~~~~~~~~~
The OCTEON TX2 SoC family NICs strip the CRC for every packet being received by
the host interface irrespective of the offload configuration.
Multicast MAC filtering
~~~~~~~~~~~~~~~~~~~~~~~
``net_octeontx2`` PMD supports multicast mac filtering feature only on physical
function devices.
SDP interface support
~~~~~~~~~~~~~~~~~~~~~
OCTEON TX2 SDP interface support is limited to PF device, No VF support.
Inline Protocol Processing
~~~~~~~~~~~~~~~~~~~~~~~~~~
``net_octeontx2`` PMD doesn't support the following features for packets to be
inline protocol processed.
- TSO offload
- VLAN/QinQ offload
- Fragmentation
Debugging Options
-----------------
.. _table_octeontx2_ethdev_debug_options:
.. table:: OCTEON TX2 ethdev debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | NIX | --log-level='pmd\.net.octeontx2,8' |
+---+------------+-------------------------------------------------------+
| 2 | NPC | --log-level='pmd\.net.octeontx2\.flow,8' |
+---+------------+-------------------------------------------------------+
RTE Flow Support
----------------
The OCTEON TX2 SoC family NIC has support for the following patterns and
actions.
Patterns:
.. _table_octeontx2_supported_flow_item_types:
.. table:: Item types
+----+--------------------------------+
| # | Pattern Type |
+====+================================+
| 1 | RTE_FLOW_ITEM_TYPE_ETH |
+----+--------------------------------+
| 2 | RTE_FLOW_ITEM_TYPE_VLAN |
+----+--------------------------------+
| 3 | RTE_FLOW_ITEM_TYPE_E_TAG |
+----+--------------------------------+
| 4 | RTE_FLOW_ITEM_TYPE_IPV4 |
+----+--------------------------------+
| 5 | RTE_FLOW_ITEM_TYPE_IPV6 |
+----+--------------------------------+
| 6 | RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4|
+----+--------------------------------+
| 7 | RTE_FLOW_ITEM_TYPE_MPLS |
+----+--------------------------------+
| 8 | RTE_FLOW_ITEM_TYPE_ICMP |
+----+--------------------------------+
| 9 | RTE_FLOW_ITEM_TYPE_UDP |
+----+--------------------------------+
| 10 | RTE_FLOW_ITEM_TYPE_TCP |
+----+--------------------------------+
| 11 | RTE_FLOW_ITEM_TYPE_SCTP |
+----+--------------------------------+
| 12 | RTE_FLOW_ITEM_TYPE_ESP |
+----+--------------------------------+
| 13 | RTE_FLOW_ITEM_TYPE_GRE |
+----+--------------------------------+
| 14 | RTE_FLOW_ITEM_TYPE_NVGRE |
+----+--------------------------------+
| 15 | RTE_FLOW_ITEM_TYPE_VXLAN |
+----+--------------------------------+
| 16 | RTE_FLOW_ITEM_TYPE_GTPC |
+----+--------------------------------+
| 17 | RTE_FLOW_ITEM_TYPE_GTPU |
+----+--------------------------------+
| 18 | RTE_FLOW_ITEM_TYPE_GENEVE |
+----+--------------------------------+
| 19 | RTE_FLOW_ITEM_TYPE_VXLAN_GPE |
+----+--------------------------------+
| 20 | RTE_FLOW_ITEM_TYPE_IPV6_EXT |
+----+--------------------------------+
| 21 | RTE_FLOW_ITEM_TYPE_VOID |
+----+--------------------------------+
| 22 | RTE_FLOW_ITEM_TYPE_ANY |
+----+--------------------------------+
| 23 | RTE_FLOW_ITEM_TYPE_GRE_KEY |
+----+--------------------------------+
| 24 | RTE_FLOW_ITEM_TYPE_HIGIG2 |
+----+--------------------------------+
| 25 | RTE_FLOW_ITEM_TYPE_RAW |
+----+--------------------------------+
.. note::
``RTE_FLOW_ITEM_TYPE_GRE_KEY`` works only when checksum and routing
bits in the GRE header are equal to 0.
Actions:
.. _table_octeontx2_supported_ingress_action_types:
.. table:: Ingress action types
+----+-----------------------------------------+
| # | Action Type |
+====+=========================================+
| 1 | RTE_FLOW_ACTION_TYPE_VOID |
+----+-----------------------------------------+
| 2 | RTE_FLOW_ACTION_TYPE_MARK |
+----+-----------------------------------------+
| 3 | RTE_FLOW_ACTION_TYPE_FLAG |
+----+-----------------------------------------+
| 4 | RTE_FLOW_ACTION_TYPE_COUNT |
+----+-----------------------------------------+
| 5 | RTE_FLOW_ACTION_TYPE_DROP |
+----+-----------------------------------------+
| 6 | RTE_FLOW_ACTION_TYPE_QUEUE |
+----+-----------------------------------------+
| 7 | RTE_FLOW_ACTION_TYPE_RSS |
+----+-----------------------------------------+
| 8 | RTE_FLOW_ACTION_TYPE_SECURITY |
+----+-----------------------------------------+
| 9 | RTE_FLOW_ACTION_TYPE_PF |
+----+-----------------------------------------+
| 10 | RTE_FLOW_ACTION_TYPE_VF |
+----+-----------------------------------------+
| 11 | RTE_FLOW_ACTION_TYPE_OF_POP_VLAN |
+----+-----------------------------------------+
| 12 | RTE_FLOW_ACTION_TYPE_PORT_ID |
+----+-----------------------------------------+
| 13 | RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR |
+----+-----------------------------------------+
.. note::
``RTE_FLOW_ACTION_TYPE_PORT_ID``, ``RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR``
are only supported between PF and its VFs.
.. _table_octeontx2_supported_egress_action_types:
.. table:: Egress action types
+----+-----------------------------------------+
| # | Action Type |
+====+=========================================+
| 1 | RTE_FLOW_ACTION_TYPE_COUNT |
+----+-----------------------------------------+
| 2 | RTE_FLOW_ACTION_TYPE_DROP |
+----+-----------------------------------------+
| 3 | RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN |
+----+-----------------------------------------+
| 4 | RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID |
+----+-----------------------------------------+
| 5 | RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP |
+----+-----------------------------------------+
Custom protocols supported in RTE Flow
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``RTE_FLOW_ITEM_TYPE_RAW`` can be used to parse the below custom protocols.
* ``vlan_exdsa`` and ``exdsa`` can be parsed at L2 level.
* ``NGIO`` can be parsed at L3 level.
For ``vlan_exdsa`` and ``exdsa``, the port has to be configured with the
respective switch header.
For example::
-a 0002:02:00.0,switch_header="vlan_exdsa"
The below fields of ``struct rte_flow_item_raw`` shall be used to specify the
pattern.
- ``relative`` Selects the layer at which parsing is done.
- 0 for ``exdsa`` and ``vlan_exdsa``.
- 1 for ``NGIO``.
- ``offset`` The offset in the header where the pattern should be matched.
- ``length`` Length of the pattern.
- ``pattern`` Pattern as a byte string.
Example usage in testpmd::
./dpdk-testpmd -c 3 -w 0002:02:00.0,switch_header=exdsa -- -i \
--rx-offloads=0x00080000 --rxq 8 --txq 8
testpmd> flow create 0 ingress pattern eth / raw relative is 0 pattern \
spec ab pattern mask ab offset is 4 / end actions queue index 1 / end

View File

@ -5,7 +5,7 @@ OCTEON TX EP Poll Mode driver
============================= =============================
The OCTEON TX EP ETHDEV PMD (**librte_pmd_octeontx_ep**) provides poll mode The OCTEON TX EP ETHDEV PMD (**librte_pmd_octeontx_ep**) provides poll mode
ethdev driver support for the virtual functions (VF) of **Marvell OCTEON TX2** ethdev driver support for the virtual functions (VF) of **Marvell OCTEON 9**
and **Cavium OCTEON TX** families of adapters in SR-IOV context. and **Cavium OCTEON TX** families of adapters in SR-IOV context.
More information can be found at `Marvell Official Website More information can be found at `Marvell Official Website
@ -24,4 +24,4 @@ must be installed separately:
allocates resources such as number of VFs, input/output queues for itself and allocates resources such as number of VFs, input/output queues for itself and
the number of i/o queues each VF can use. the number of i/o queues each VF can use.
See :doc:`../platform/octeontx2` for SDP interface information which provides PCIe endpoint support for a remote host. See :doc:`../platform/cnxk` for SDP interface information which provides PCIe endpoint support for a remote host.

View File

@ -13,6 +13,9 @@ More information about CN9K and CN10K SoC can be found at `Marvell Official Webs
Supported OCTEON cnxk SoCs Supported OCTEON cnxk SoCs
-------------------------- --------------------------
- CN93xx
- CN96xx
- CN98xx
- CN106xx - CN106xx
- CNF105xx - CNF105xx
@ -583,6 +586,15 @@ Cross Compilation
Refer to :doc:`../linux_gsg/cross_build_dpdk_for_arm64` for generic arm64 details. Refer to :doc:`../linux_gsg/cross_build_dpdk_for_arm64` for generic arm64 details.
CN9K:
.. code-block:: console
meson build --cross-file config/arm/arm64_cn9k_linux_gcc
ninja -C build
CN10K:
.. code-block:: console .. code-block:: console
meson build --cross-file config/arm/arm64_cn10k_linux_gcc meson build --cross-file config/arm/arm64_cn10k_linux_gcc

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 119 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 106 KiB

View File

@ -15,4 +15,3 @@ The following are platform specific guides and setup information.
dpaa dpaa
dpaa2 dpaa2
octeontx octeontx
octeontx2

View File

@ -1,520 +0,0 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2019 Marvell International Ltd.
Marvell OCTEON TX2 Platform Guide
=================================
This document gives an overview of **Marvell OCTEON TX2** RVU H/W block,
packet flow and procedure to build DPDK on OCTEON TX2 platform.
More information about OCTEON TX2 SoC can be found at `Marvell Official Website
<https://www.marvell.com/embedded-processors/infrastructure-processors/>`_.
Supported OCTEON TX2 SoCs
-------------------------
- CN98xx
- CN96xx
- CN93xx
OCTEON TX2 Resource Virtualization Unit architecture
----------------------------------------------------
The :numref:`figure_octeontx2_resource_virtualization` diagram depicts the
RVU architecture and a resource provisioning example.
.. _figure_octeontx2_resource_virtualization:
.. figure:: img/octeontx2_resource_virtualization.*
OCTEON TX2 Resource virtualization architecture and provisioning example
Resource Virtualization Unit (RVU) on Marvell's OCTEON TX2 SoC maps HW
resources belonging to the network, crypto and other functional blocks onto
PCI-compatible physical and virtual functions.
Each functional block has multiple local functions (LFs) for
provisioning to different PCIe devices. RVU supports multiple PCIe SRIOV
physical functions (PFs) and virtual functions (VFs).
The :numref:`table_octeontx2_rvu_dpdk_mapping` shows the various local
functions (LFs) provided by the RVU and its functional mapping to
DPDK subsystem.
.. _table_octeontx2_rvu_dpdk_mapping:
.. table:: RVU managed functional blocks and its mapping to DPDK subsystem
+---+-----+--------------------------------------------------------------+
| # | LF | DPDK subsystem mapping |
+===+=====+==============================================================+
| 1 | NIX | rte_ethdev, rte_tm, rte_event_eth_[rt]x_adapter, rte_security|
+---+-----+--------------------------------------------------------------+
| 2 | NPA | rte_mempool |
+---+-----+--------------------------------------------------------------+
| 3 | NPC | rte_flow |
+---+-----+--------------------------------------------------------------+
| 4 | CPT | rte_cryptodev, rte_event_crypto_adapter |
+---+-----+--------------------------------------------------------------+
| 5 | SSO | rte_eventdev |
+---+-----+--------------------------------------------------------------+
| 6 | TIM | rte_event_timer_adapter |
+---+-----+--------------------------------------------------------------+
| 7 | LBK | rte_ethdev |
+---+-----+--------------------------------------------------------------+
| 8 | DPI | rte_rawdev |
+---+-----+--------------------------------------------------------------+
| 9 | SDP | rte_ethdev |
+---+-----+--------------------------------------------------------------+
| 10| REE | rte_regexdev |
+---+-----+--------------------------------------------------------------+
PF0 is called the administrative / admin function (AF) and has exclusive
privileges to provision RVU functional block's LFs to each of the PF/VF.
PF/VFs communicates with AF via a shared memory region (mailbox).Upon receiving
requests from PF/VF, AF does resource provisioning and other HW configuration.
AF is always attached to host, but PF/VFs may be used by host kernel itself,
or attached to VMs or to userspace applications like DPDK, etc. So, AF has to
handle provisioning/configuration requests sent by any device from any domain.
The AF driver does not receive or process any data.
It is only a configuration driver used in control path.
The :numref:`figure_octeontx2_resource_virtualization` diagram also shows a
resource provisioning example where,
1. PFx and PFx-VF0 bound to Linux netdev driver.
2. PFx-VF1 ethdev driver bound to the first DPDK application.
3. PFy ethdev driver, PFy-VF0 ethdev driver, PFz eventdev driver, PFm-VF0 cryptodev driver bound to the second DPDK application.
LBK HW Access
-------------
Loopback HW Unit (LBK) receives packets from NIX-RX and sends packets back to NIX-TX.
The loopback block has N channels and contains data buffering that is shared across
all channels. The LBK HW Unit is abstracted using ethdev subsystem, Where PF0's
VFs are exposed as ethdev device and odd-even pairs of VFs are tied together,
that is, packets sent on odd VF end up received on even VF and vice versa.
This would enable HW accelerated means of communication between two domains
where even VF bound to the first domain and odd VF bound to the second domain.
Typical application usage models are,
#. Communication between the Linux kernel and DPDK application.
#. Exception path to Linux kernel from DPDK application as SW ``KNI`` replacement.
#. Communication between two different DPDK applications.
SDP interface
-------------
System DPI Packet Interface unit(SDP) provides PCIe endpoint support for remote host
to DMA packets into and out of OCTEON TX2 SoC. SDP interface comes in to live only when
OCTEON TX2 SoC is connected in PCIe endpoint mode. It can be used to send/receive
packets to/from remote host machine using input/output queue pairs exposed to it.
SDP interface receives input packets from remote host from NIX-RX and sends packets
to remote host using NIX-TX. Remote host machine need to use corresponding driver
(kernel/user mode) to communicate with SDP interface on OCTEON TX2 SoC. SDP supports
single PCIe SRIOV physical function(PF) and multiple virtual functions(VF's). Users
can bind PF or VF to use SDP interface and it will be enumerated as ethdev ports.
The primary use case for SDP is to enable the smart NIC use case. Typical usage models are,
#. Communication channel between remote host and OCTEON TX2 SoC over PCIe.
#. Transfer packets received from network interface to remote host over PCIe and
vice-versa.
OCTEON TX2 packet flow
----------------------
The :numref:`figure_octeontx2_packet_flow_hw_accelerators` diagram depicts
the packet flow on OCTEON TX2 SoC in conjunction with use of various HW accelerators.
.. _figure_octeontx2_packet_flow_hw_accelerators:
.. figure:: img/octeontx2_packet_flow_hw_accelerators.*
OCTEON TX2 packet flow in conjunction with use of HW accelerators
HW Offload Drivers
------------------
This section lists dataplane H/W block(s) available in OCTEON TX2 SoC.
#. **Ethdev Driver**
See :doc:`../nics/octeontx2` for NIX Ethdev driver information.
#. **Mempool Driver**
See :doc:`../mempool/octeontx2` for NPA mempool driver information.
#. **Event Device Driver**
See :doc:`../eventdevs/octeontx2` for SSO event device driver information.
#. **Crypto Device Driver**
See :doc:`../cryptodevs/octeontx2` for CPT crypto device driver information.
Procedure to Setup Platform
---------------------------
There are three main prerequisites for setting up DPDK on OCTEON TX2
compatible board:
1. **OCTEON TX2 Linux kernel driver**
The dependent kernel drivers can be obtained from the
`kernel.org <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/ethernet/marvell/octeontx2>`_.
Alternatively, the Marvell SDK also provides the required kernel drivers.
Linux kernel should be configured with the following features enabled:
.. code-block:: console
# 64K pages enabled for better performance
CONFIG_ARM64_64K_PAGES=y
CONFIG_ARM64_VA_BITS_48=y
# huge pages support enabled
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
# VFIO enabled with TYPE1 IOMMU at minimum
CONFIG_VFIO_IOMMU_TYPE1=y
CONFIG_VFIO_VIRQFD=y
CONFIG_VFIO=y
CONFIG_VFIO_NOIOMMU=y
CONFIG_VFIO_PCI=y
CONFIG_VFIO_PCI_MMAP=y
# SMMUv3 driver
CONFIG_ARM_SMMU_V3=y
# ARMv8.1 LSE atomics
CONFIG_ARM64_LSE_ATOMICS=y
# OCTEONTX2 drivers
CONFIG_OCTEONTX2_MBOX=y
CONFIG_OCTEONTX2_AF=y
# Enable if netdev PF driver required
CONFIG_OCTEONTX2_PF=y
# Enable if netdev VF driver required
CONFIG_OCTEONTX2_VF=y
CONFIG_CRYPTO_DEV_OCTEONTX2_CPT=y
# Enable if OCTEONTX2 DMA PF driver required
CONFIG_OCTEONTX2_DPI_PF=n
2. **ARM64 Linux Tool Chain**
For example, the *aarch64* Linaro Toolchain, which can be obtained from
`here <https://releases.linaro.org/components/toolchain/binaries/7.4-2019.02/aarch64-linux-gnu/>`_.
Alternatively, the Marvell SDK also provides GNU GCC toolchain, which is
optimized for OCTEON TX2 CPU.
3. **Rootfile system**
Any *aarch64* supporting filesystem may be used. For example,
Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
from `<http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
Alternatively, the Marvell SDK provides the buildroot based root filesystem.
The SDK includes all the above prerequisites necessary to bring up the OCTEON TX2 board.
- Follow the DPDK :doc:`../linux_gsg/index` to setup the basic DPDK environment.
Debugging Options
-----------------
.. _table_octeontx2_common_debug_options:
.. table:: OCTEON TX2 common debug options
+---+------------+-------------------------------------------------------+
| # | Component | EAL log command |
+===+============+=======================================================+
| 1 | Common | --log-level='pmd\.octeontx2\.base,8' |
+---+------------+-------------------------------------------------------+
| 2 | Mailbox | --log-level='pmd\.octeontx2\.mbox,8' |
+---+------------+-------------------------------------------------------+
Debugfs support
~~~~~~~~~~~~~~~
The **OCTEON TX2 Linux kernel driver** provides support to dump RVU blocks
context or stats using debugfs.
Enable ``debugfs`` by:
1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUGFS=y``.
2. Boot OCTEON TX2 with debugfs supported kernel.
3. Verify ``debugfs`` mounted by default "mount | grep -i debugfs" or mount it manually by using.
.. code-block:: console
# mount -t debugfs none /sys/kernel/debug
Currently ``debugfs`` supports the following RVU blocks NIX, NPA, NPC, NDC,
SSO & CGX.
The file structure under ``/sys/kernel/debug`` is as follows
.. code-block:: console
octeontx2/
|-- cgx
| |-- cgx0
| | '-- lmac0
| | '-- stats
| |-- cgx1
| | |-- lmac0
| | | '-- stats
| | '-- lmac1
| | '-- stats
| '-- cgx2
| '-- lmac0
| '-- stats
|-- cpt
| |-- cpt_engines_info
| |-- cpt_engines_sts
| |-- cpt_err_info
| |-- cpt_lfs_info
| '-- cpt_pc
|---- nix
| |-- cq_ctx
| |-- ndc_rx_cache
| |-- ndc_rx_hits_miss
| |-- ndc_tx_cache
| |-- ndc_tx_hits_miss
| |-- qsize
| |-- rq_ctx
| |-- sq_ctx
| '-- tx_stall_hwissue
|-- npa
| |-- aura_ctx
| |-- ndc_cache
| |-- ndc_hits_miss
| |-- pool_ctx
| '-- qsize
|-- npc
| |-- mcam_info
| '-- rx_miss_act_stats
|-- rsrc_alloc
'-- sso
|-- hws
| '-- sso_hws_info
'-- hwgrp
|-- sso_hwgrp_aq_thresh
|-- sso_hwgrp_iaq_walk
|-- sso_hwgrp_pc
|-- sso_hwgrp_free_list_walk
|-- sso_hwgrp_ient_walk
'-- sso_hwgrp_taq_walk
RVU block LF allocation:
.. code-block:: console
cat /sys/kernel/debug/octeontx2/rsrc_alloc
pcifunc NPA NIX SSO GROUP SSOWS TIM CPT
PF1 0 0
PF4 1
PF13 0, 1 0, 1 0
CGX example usage:
.. code-block:: console
cat /sys/kernel/debug/octeontx2/cgx/cgx2/lmac0/stats
=======Link Status======
Link is UP 40000 Mbps
=======RX_STATS======
Received packets: 0
Octets of received packets: 0
Received PAUSE packets: 0
Received PAUSE and control packets: 0
Filtered DMAC0 (NIX-bound) packets: 0
Filtered DMAC0 (NIX-bound) octets: 0
Packets dropped due to RX FIFO full: 0
Octets dropped due to RX FIFO full: 0
Error packets: 0
Filtered DMAC1 (NCSI-bound) packets: 0
Filtered DMAC1 (NCSI-bound) octets: 0
NCSI-bound packets dropped: 0
NCSI-bound octets dropped: 0
=======TX_STATS======
Packets dropped due to excessive collisions: 0
Packets dropped due to excessive deferral: 0
Multiple collisions before successful transmission: 0
Single collisions before successful transmission: 0
Total octets sent on the interface: 0
Total frames sent on the interface: 0
Packets sent with an octet count < 64: 0
Packets sent with an octet count == 64: 0
Packets sent with an octet count of 65127: 0
Packets sent with an octet count of 128-255: 0
Packets sent with an octet count of 256-511: 0
Packets sent with an octet count of 512-1023: 0
Packets sent with an octet count of 1024-1518: 0
Packets sent with an octet count of > 1518: 0
Packets sent to a broadcast DMAC: 0
Packets sent to the multicast DMAC: 0
Transmit underflow and were truncated: 0
Control/PAUSE packets sent: 0
CPT example usage:
.. code-block:: console
cat /sys/kernel/debug/octeontx2/cpt/cpt_pc
CPT instruction requests 0
CPT instruction latency 0
CPT NCB read requests 0
CPT NCB read latency 0
CPT read requests caused by UC fills 0
CPT active cycles pc 1395642
CPT clock count pc 5579867595493
NIX example usage:
.. code-block:: console
Usage: echo <nixlf> [cq number/all] > /sys/kernel/debug/octeontx2/nix/cq_ctx
cat /sys/kernel/debug/octeontx2/nix/cq_ctx
echo 0 0 > /sys/kernel/debug/octeontx2/nix/cq_ctx
cat /sys/kernel/debug/octeontx2/nix/cq_ctx
=====cq_ctx for nixlf:0 and qidx:0 is=====
W0: base 158ef1a00
W1: wrptr 0
W1: avg_con 0
W1: cint_idx 0
W1: cq_err 0
W1: qint_idx 0
W1: bpid 0
W1: bp_ena 0
W2: update_time 31043
W2:avg_level 255
W2: head 0
W2:tail 0
W3: cq_err_int_ena 5
W3:cq_err_int 0
W3: qsize 4
W3:caching 1
W3: substream 0x000
W3: ena 1
W3: drop_ena 1
W3: drop 64
W3: bp 0
NPA example usage:
.. code-block:: console
Usage: echo <npalf> [pool number/all] > /sys/kernel/debug/octeontx2/npa/pool_ctx
cat /sys/kernel/debug/octeontx2/npa/pool_ctx
echo 0 0 > /sys/kernel/debug/octeontx2/npa/pool_ctx
cat /sys/kernel/debug/octeontx2/npa/pool_ctx
======POOL : 0=======
W0: Stack base 1375bff00
W1: ena 1
W1: nat_align 1
W1: stack_caching 1
W1: stack_way_mask 0
W1: buf_offset 1
W1: buf_size 19
W2: stack_max_pages 24315
W2: stack_pages 24314
W3: op_pc 267456
W4: stack_offset 2
W4: shift 5
W4: avg_level 255
W4: avg_con 0
W4: fc_ena 0
W4: fc_stype 0
W4: fc_hyst_bits 0
W4: fc_up_crossing 0
W4: update_time 62993
W5: fc_addr 0
W6: ptr_start 1593adf00
W7: ptr_end 180000000
W8: err_int 0
W8: err_int_ena 7
W8: thresh_int 0
W8: thresh_int_ena 0
W8: thresh_up 0
W8: thresh_qint_idx 0
W8: err_qint_idx 0
NPC example usage:
.. code-block:: console
cat /sys/kernel/debug/octeontx2/npc/mcam_info
NPC MCAM info:
RX keywidth : 224bits
TX keywidth : 224bits
MCAM entries : 2048
Reserved : 158
Available : 1890
MCAM counters : 512
Reserved : 1
Available : 511
SSO example usage:
.. code-block:: console
Usage: echo [<hws>/all] > /sys/kernel/debug/octeontx2/sso/hws/sso_hws_info
echo 0 > /sys/kernel/debug/octeontx2/sso/hws/sso_hws_info
==================================================
SSOW HWS[0] Arbitration State 0x0
SSOW HWS[0] Guest Machine Control 0x0
SSOW HWS[0] SET[0] Group Mask[0] 0xffffffffffffffff
SSOW HWS[0] SET[0] Group Mask[1] 0xffffffffffffffff
SSOW HWS[0] SET[0] Group Mask[2] 0xffffffffffffffff
SSOW HWS[0] SET[0] Group Mask[3] 0xffffffffffffffff
SSOW HWS[0] SET[1] Group Mask[0] 0xffffffffffffffff
SSOW HWS[0] SET[1] Group Mask[1] 0xffffffffffffffff
SSOW HWS[0] SET[1] Group Mask[2] 0xffffffffffffffff
SSOW HWS[0] SET[1] Group Mask[3] 0xffffffffffffffff
==================================================
Compile DPDK
------------
DPDK may be compiled either natively on OCTEON TX2 platform or cross-compiled on
an x86 based platform.
Native Compilation
~~~~~~~~~~~~~~~~~~
.. code-block:: console
meson build
ninja -C build
Cross Compilation
~~~~~~~~~~~~~~~~~
Refer to :doc:`../linux_gsg/cross_build_dpdk_for_arm64` for generic arm64 details.
.. code-block:: console
meson build --cross-file config/arm/arm64_octeontx2_linux_gcc
ninja -C build
.. note::
By default, meson cross compilation uses ``aarch64-linux-gnu-gcc`` toolchain,
if Marvell toolchain is available then it can be used by overriding the
c, cpp, ar, strip ``binaries`` attributes to respective Marvell
toolchain binaries in ``config/arm/arm64_octeontx2_linux_gcc`` file.

View File

@ -125,20 +125,3 @@ Deprecation Notices
applications should be updated to use the ``dmadev`` library instead, applications should be updated to use the ``dmadev`` library instead,
with the underlying HW-functionality being provided by the ``ioat`` or with the underlying HW-functionality being provided by the ``ioat`` or
``idxd`` dma drivers ``idxd`` dma drivers
* drivers/octeontx2: remove octeontx2 drivers
In the view of enabling unified driver for ``octeontx2(cn9k)``/``octeontx3(cn10k)``,
removing ``drivers/octeontx2`` drivers and replace with ``drivers/cnxk/`` which
supports both ``octeontx2(cn9k)`` and ``octeontx3(cn10k)`` SoCs.
This deprecation notice is to do following actions in DPDK v22.02 version.
#. Replace ``drivers/common/octeontx2/`` with ``drivers/common/cnxk/``
#. Replace ``drivers/mempool/octeontx2/`` with ``drivers/mempool/cnxk/``
#. Replace ``drivers/net/octeontx2/`` with ``drivers/net/cnxk/``
#. Replace ``drivers/event/octeontx2/`` with ``drivers/event/cnxk/``
#. Replace ``drivers/crypto/octeontx2/`` with ``drivers/crypto/cnxk/``
#. Rename ``drivers/regex/octeontx2/`` as ``drivers/regex/cn9k/``
#. Rename ``config/arm/arm64_octeontx2_linux_gcc`` as ``config/arm/arm64_cn9k_linux_gcc``
Last two actions are to align naming convention as cnxk scheme.

View File

@ -152,11 +152,11 @@ New Features
``eventdev Tx adapter``, ``eventdev Timer adapter`` and ``rawdev DMA`` ``eventdev Tx adapter``, ``eventdev Timer adapter`` and ``rawdev DMA``
drivers for various HW co-processors available in ``OCTEON TX2`` SoC. drivers for various HW co-processors available in ``OCTEON TX2`` SoC.
See :doc:`../platform/octeontx2` and driver information: See ``platform/octeontx2`` and driver information:
* :doc:`../nics/octeontx2` * ``nics/octeontx2``
* :doc:`../mempool/octeontx2` * ``mempool/octeontx2``
* :doc:`../eventdevs/octeontx2` * ``eventdevs/octeontx2``
* ``rawdevs/octeontx2_dma`` * ``rawdevs/octeontx2_dma``
* **Introduced the Intel NTB PMD.** * **Introduced the Intel NTB PMD.**

View File

@ -192,7 +192,7 @@ New Features
Added a new PMD for hardware crypto offload block on ``OCTEON TX2`` Added a new PMD for hardware crypto offload block on ``OCTEON TX2``
SoC. SoC.
See :doc:`../cryptodevs/octeontx2` for more details See ``cryptodevs/octeontx2`` for more details
* **Updated NXP crypto PMDs for PDCP support.** * **Updated NXP crypto PMDs for PDCP support.**

View File

@ -157,7 +157,6 @@ The following are the application command-line options:
crypto_mvsam crypto_mvsam
crypto_null crypto_null
crypto_octeontx crypto_octeontx
crypto_octeontx2
crypto_openssl crypto_openssl
crypto_qat crypto_qat
crypto_scheduler crypto_scheduler

View File

@ -8,5 +8,4 @@ drivers = [
'iavf', 'iavf',
'mvep', 'mvep',
'octeontx', 'octeontx',
'octeontx2',
] ]

File diff suppressed because it is too large Load Diff

View File

@ -1,305 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_NPA_HW_H__
#define __OTX2_NPA_HW_H__
/* Register offsets */
#define NPA_AF_BLK_RST (0x0ull)
#define NPA_AF_CONST (0x10ull)
#define NPA_AF_CONST1 (0x18ull)
#define NPA_AF_LF_RST (0x20ull)
#define NPA_AF_GEN_CFG (0x30ull)
#define NPA_AF_NDC_CFG (0x40ull)
#define NPA_AF_NDC_SYNC (0x50ull)
#define NPA_AF_INP_CTL (0xd0ull)
#define NPA_AF_ACTIVE_CYCLES_PC (0xf0ull)
#define NPA_AF_AVG_DELAY (0x100ull)
#define NPA_AF_GEN_INT (0x140ull)
#define NPA_AF_GEN_INT_W1S (0x148ull)
#define NPA_AF_GEN_INT_ENA_W1S (0x150ull)
#define NPA_AF_GEN_INT_ENA_W1C (0x158ull)
#define NPA_AF_RVU_INT (0x160ull)
#define NPA_AF_RVU_INT_W1S (0x168ull)
#define NPA_AF_RVU_INT_ENA_W1S (0x170ull)
#define NPA_AF_RVU_INT_ENA_W1C (0x178ull)
#define NPA_AF_ERR_INT (0x180ull)
#define NPA_AF_ERR_INT_W1S (0x188ull)
#define NPA_AF_ERR_INT_ENA_W1S (0x190ull)
#define NPA_AF_ERR_INT_ENA_W1C (0x198ull)
#define NPA_AF_RAS (0x1a0ull)
#define NPA_AF_RAS_W1S (0x1a8ull)
#define NPA_AF_RAS_ENA_W1S (0x1b0ull)
#define NPA_AF_RAS_ENA_W1C (0x1b8ull)
#define NPA_AF_AQ_CFG (0x600ull)
#define NPA_AF_AQ_BASE (0x610ull)
#define NPA_AF_AQ_STATUS (0x620ull)
#define NPA_AF_AQ_DOOR (0x630ull)
#define NPA_AF_AQ_DONE_WAIT (0x640ull)
#define NPA_AF_AQ_DONE (0x650ull)
#define NPA_AF_AQ_DONE_ACK (0x660ull)
#define NPA_AF_AQ_DONE_TIMER (0x670ull)
#define NPA_AF_AQ_DONE_INT (0x680ull)
#define NPA_AF_AQ_DONE_ENA_W1S (0x690ull)
#define NPA_AF_AQ_DONE_ENA_W1C (0x698ull)
#define NPA_AF_LFX_AURAS_CFG(a) (0x4000ull | (uint64_t)(a) << 18)
#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010ull | (uint64_t)(a) << 18)
#define NPA_AF_LFX_QINTS_CFG(a) (0x4100ull | (uint64_t)(a) << 18)
#define NPA_AF_LFX_QINTS_BASE(a) (0x4110ull | (uint64_t)(a) << 18)
#define NPA_PRIV_AF_INT_CFG (0x10000ull)
#define NPA_PRIV_LFX_CFG(a) (0x10010ull | (uint64_t)(a) << 8)
#define NPA_PRIV_LFX_INT_CFG(a) (0x10020ull | (uint64_t)(a) << 8)
#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030ull)
#define NPA_AF_DTX_FILTER_CTL (0x10040ull)
#define NPA_LF_AURA_OP_ALLOCX(a) (0x10ull | (uint64_t)(a) << 3)
#define NPA_LF_AURA_OP_FREE0 (0x20ull)
#define NPA_LF_AURA_OP_FREE1 (0x28ull)
#define NPA_LF_AURA_OP_CNT (0x30ull)
#define NPA_LF_AURA_OP_LIMIT (0x50ull)
#define NPA_LF_AURA_OP_INT (0x60ull)
#define NPA_LF_AURA_OP_THRESH (0x70ull)
#define NPA_LF_POOL_OP_PC (0x100ull)
#define NPA_LF_POOL_OP_AVAILABLE (0x110ull)
#define NPA_LF_POOL_OP_PTR_START0 (0x120ull)
#define NPA_LF_POOL_OP_PTR_START1 (0x128ull)
#define NPA_LF_POOL_OP_PTR_END0 (0x130ull)
#define NPA_LF_POOL_OP_PTR_END1 (0x138ull)
#define NPA_LF_POOL_OP_INT (0x160ull)
#define NPA_LF_POOL_OP_THRESH (0x170ull)
#define NPA_LF_ERR_INT (0x200ull)
#define NPA_LF_ERR_INT_W1S (0x208ull)
#define NPA_LF_ERR_INT_ENA_W1C (0x210ull)
#define NPA_LF_ERR_INT_ENA_W1S (0x218ull)
#define NPA_LF_RAS (0x220ull)
#define NPA_LF_RAS_W1S (0x228ull)
#define NPA_LF_RAS_ENA_W1C (0x230ull)
#define NPA_LF_RAS_ENA_W1S (0x238ull)
#define NPA_LF_QINTX_CNT(a) (0x300ull | (uint64_t)(a) << 12)
#define NPA_LF_QINTX_INT(a) (0x310ull | (uint64_t)(a) << 12)
#define NPA_LF_QINTX_ENA_W1S(a) (0x320ull | (uint64_t)(a) << 12)
#define NPA_LF_QINTX_ENA_W1C(a) (0x330ull | (uint64_t)(a) << 12)
/* Enum offsets */
#define NPA_AQ_COMP_NOTDONE (0x0ull)
#define NPA_AQ_COMP_GOOD (0x1ull)
#define NPA_AQ_COMP_SWERR (0x2ull)
#define NPA_AQ_COMP_CTX_POISON (0x3ull)
#define NPA_AQ_COMP_CTX_FAULT (0x4ull)
#define NPA_AQ_COMP_LOCKERR (0x5ull)
#define NPA_AF_INT_VEC_RVU (0x0ull)
#define NPA_AF_INT_VEC_GEN (0x1ull)
#define NPA_AF_INT_VEC_AQ_DONE (0x2ull)
#define NPA_AF_INT_VEC_AF_ERR (0x3ull)
#define NPA_AF_INT_VEC_POISON (0x4ull)
#define NPA_AQ_INSTOP_NOP (0x0ull)
#define NPA_AQ_INSTOP_INIT (0x1ull)
#define NPA_AQ_INSTOP_WRITE (0x2ull)
#define NPA_AQ_INSTOP_READ (0x3ull)
#define NPA_AQ_INSTOP_LOCK (0x4ull)
#define NPA_AQ_INSTOP_UNLOCK (0x5ull)
#define NPA_AQ_CTYPE_AURA (0x0ull)
#define NPA_AQ_CTYPE_POOL (0x1ull)
#define NPA_BPINTF_NIX0_RX (0x0ull)
#define NPA_BPINTF_NIX1_RX (0x1ull)
#define NPA_AURA_ERR_INT_AURA_FREE_UNDER (0x0ull)
#define NPA_AURA_ERR_INT_AURA_ADD_OVER (0x1ull)
#define NPA_AURA_ERR_INT_AURA_ADD_UNDER (0x2ull)
#define NPA_AURA_ERR_INT_POOL_DIS (0x3ull)
#define NPA_AURA_ERR_INT_R4 (0x4ull)
#define NPA_AURA_ERR_INT_R5 (0x5ull)
#define NPA_AURA_ERR_INT_R6 (0x6ull)
#define NPA_AURA_ERR_INT_R7 (0x7ull)
#define NPA_LF_INT_VEC_ERR_INT (0x40ull)
#define NPA_LF_INT_VEC_POISON (0x41ull)
#define NPA_LF_INT_VEC_QINT_END (0x3full)
#define NPA_LF_INT_VEC_QINT_START (0x0ull)
#define NPA_INPQ_SSO (0x4ull)
#define NPA_INPQ_TIM (0x5ull)
#define NPA_INPQ_DPI (0x6ull)
#define NPA_INPQ_AURA_OP (0xeull)
#define NPA_INPQ_INTERNAL_RSV (0xfull)
#define NPA_INPQ_NIX0_RX (0x0ull)
#define NPA_INPQ_NIX1_RX (0x2ull)
#define NPA_INPQ_NIX0_TX (0x1ull)
#define NPA_INPQ_NIX1_TX (0x3ull)
#define NPA_INPQ_R_END (0xdull)
#define NPA_INPQ_R_START (0x7ull)
#define NPA_POOL_ERR_INT_OVFLS (0x0ull)
#define NPA_POOL_ERR_INT_RANGE (0x1ull)
#define NPA_POOL_ERR_INT_PERR (0x2ull)
#define NPA_POOL_ERR_INT_R3 (0x3ull)
#define NPA_POOL_ERR_INT_R4 (0x4ull)
#define NPA_POOL_ERR_INT_R5 (0x5ull)
#define NPA_POOL_ERR_INT_R6 (0x6ull)
#define NPA_POOL_ERR_INT_R7 (0x7ull)
#define NPA_NDC0_PORT_AURA0 (0x0ull)
#define NPA_NDC0_PORT_AURA1 (0x1ull)
#define NPA_NDC0_PORT_POOL0 (0x2ull)
#define NPA_NDC0_PORT_POOL1 (0x3ull)
#define NPA_NDC0_PORT_STACK0 (0x4ull)
#define NPA_NDC0_PORT_STACK1 (0x5ull)
#define NPA_LF_ERR_INT_AURA_DIS (0x0ull)
#define NPA_LF_ERR_INT_AURA_OOR (0x1ull)
#define NPA_LF_ERR_INT_AURA_FAULT (0xcull)
#define NPA_LF_ERR_INT_POOL_FAULT (0xdull)
#define NPA_LF_ERR_INT_STACK_FAULT (0xeull)
#define NPA_LF_ERR_INT_QINT_FAULT (0xfull)
/* Structures definitions */
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
uint64_t op : 4;
uint64_t ctype : 4;
uint64_t lf : 9;
uint64_t rsvd_23_17 : 7;
uint64_t cindex : 20;
uint64_t rsvd_62_44 : 19;
uint64_t doneint : 1;
uint64_t res_addr : 64; /* W1 */
};
/* NPA admin queue result structure */
struct npa_aq_res_s {
uint64_t op : 4;
uint64_t ctype : 4;
uint64_t compcode : 8;
uint64_t doneint : 1;
uint64_t rsvd_63_17 : 47;
uint64_t rsvd_127_64 : 64; /* W1 */
};
/* NPA aura operation write data structure */
struct npa_aura_op_wdata_s {
uint64_t aura : 20;
uint64_t rsvd_62_20 : 43;
uint64_t drop : 1;
};
/* NPA aura context structure */
struct npa_aura_s {
uint64_t pool_addr : 64;/* W0 */
uint64_t ena : 1;
uint64_t rsvd_66_65 : 2;
uint64_t pool_caching : 1;
uint64_t pool_way_mask : 16;
uint64_t avg_con : 9;
uint64_t rsvd_93 : 1;
uint64_t pool_drop_ena : 1;
uint64_t aura_drop_ena : 1;
uint64_t bp_ena : 2;
uint64_t rsvd_103_98 : 6;
uint64_t aura_drop : 8;
uint64_t shift : 6;
uint64_t rsvd_119_118 : 2;
uint64_t avg_level : 8;
uint64_t count : 36;
uint64_t rsvd_167_164 : 4;
uint64_t nix0_bpid : 9;
uint64_t rsvd_179_177 : 3;
uint64_t nix1_bpid : 9;
uint64_t rsvd_191_189 : 3;
uint64_t limit : 36;
uint64_t rsvd_231_228 : 4;
uint64_t bp : 8;
uint64_t rsvd_243_240 : 4;
uint64_t fc_ena : 1;
uint64_t fc_up_crossing : 1;
uint64_t fc_stype : 2;
uint64_t fc_hyst_bits : 4;
uint64_t rsvd_255_252 : 4;
uint64_t fc_addr : 64;/* W4 */
uint64_t pool_drop : 8;
uint64_t update_time : 16;
uint64_t err_int : 8;
uint64_t err_int_ena : 8;
uint64_t thresh_int : 1;
uint64_t thresh_int_ena : 1;
uint64_t thresh_up : 1;
uint64_t rsvd_363 : 1;
uint64_t thresh_qint_idx : 7;
uint64_t rsvd_371 : 1;
uint64_t err_qint_idx : 7;
uint64_t rsvd_383_379 : 5;
uint64_t thresh : 36;
uint64_t rsvd_447_420 : 28;
uint64_t rsvd_511_448 : 64;/* W7 */
};
/* NPA pool context structure */
struct npa_pool_s {
uint64_t stack_base : 64;/* W0 */
uint64_t ena : 1;
uint64_t nat_align : 1;
uint64_t rsvd_67_66 : 2;
uint64_t stack_caching : 1;
uint64_t rsvd_71_69 : 3;
uint64_t stack_way_mask : 16;
uint64_t buf_offset : 12;
uint64_t rsvd_103_100 : 4;
uint64_t buf_size : 11;
uint64_t rsvd_127_115 : 13;
uint64_t stack_max_pages : 32;
uint64_t stack_pages : 32;
uint64_t op_pc : 48;
uint64_t rsvd_255_240 : 16;
uint64_t stack_offset : 4;
uint64_t rsvd_263_260 : 4;
uint64_t shift : 6;
uint64_t rsvd_271_270 : 2;
uint64_t avg_level : 8;
uint64_t avg_con : 9;
uint64_t fc_ena : 1;
uint64_t fc_stype : 2;
uint64_t fc_hyst_bits : 4;
uint64_t fc_up_crossing : 1;
uint64_t rsvd_299_297 : 3;
uint64_t update_time : 16;
uint64_t rsvd_319_316 : 4;
uint64_t fc_addr : 64;/* W5 */
uint64_t ptr_start : 64;/* W6 */
uint64_t ptr_end : 64;/* W7 */
uint64_t rsvd_535_512 : 24;
uint64_t err_int : 8;
uint64_t err_int_ena : 8;
uint64_t thresh_int : 1;
uint64_t thresh_int_ena : 1;
uint64_t thresh_up : 1;
uint64_t rsvd_555 : 1;
uint64_t thresh_qint_idx : 7;
uint64_t rsvd_563 : 1;
uint64_t err_qint_idx : 7;
uint64_t rsvd_575_571 : 5;
uint64_t thresh : 36;
uint64_t rsvd_639_612 : 28;
uint64_t rsvd_703_640 : 64;/* W10 */
uint64_t rsvd_767_704 : 64;/* W11 */
uint64_t rsvd_831_768 : 64;/* W12 */
uint64_t rsvd_895_832 : 64;/* W13 */
uint64_t rsvd_959_896 : 64;/* W14 */
uint64_t rsvd_1023_960 : 64;/* W15 */
};
/* NPA queue interrupt context hardware structure */
struct npa_qint_hw_s {
uint32_t count : 22;
uint32_t rsvd_30_22 : 9;
uint32_t ena : 1;
};
#endif /* __OTX2_NPA_HW_H__ */

View File

@ -1,503 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_NPC_HW_H__
#define __OTX2_NPC_HW_H__
/* Register offsets */
#define NPC_AF_CFG (0x0ull)
#define NPC_AF_ACTIVE_PC (0x10ull)
#define NPC_AF_CONST (0x20ull)
#define NPC_AF_CONST1 (0x30ull)
#define NPC_AF_BLK_RST (0x40ull)
#define NPC_AF_MCAM_SCRUB_CTL (0xa0ull)
#define NPC_AF_KCAM_SCRUB_CTL (0xb0ull)
#define NPC_AF_KPUX_CFG(a) \
(0x500ull | (uint64_t)(a) << 3)
#define NPC_AF_PCK_CFG (0x600ull)
#define NPC_AF_PCK_DEF_OL2 (0x610ull)
#define NPC_AF_PCK_DEF_OIP4 (0x620ull)
#define NPC_AF_PCK_DEF_OIP6 (0x630ull)
#define NPC_AF_PCK_DEF_IIP4 (0x640ull)
#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) \
(0x800ull | (uint64_t)(a) << 3)
#define NPC_AF_INTFX_KEX_CFG(a) \
(0x1010ull | (uint64_t)(a) << 8)
#define NPC_AF_PKINDX_ACTION0(a) \
(0x80000ull | (uint64_t)(a) << 6)
#define NPC_AF_PKINDX_ACTION1(a) \
(0x80008ull | (uint64_t)(a) << 6)
#define NPC_AF_PKINDX_CPI_DEFX(a, b) \
(0x80020ull | (uint64_t)(a) << 6 | (uint64_t)(b) << 3)
#define NPC_AF_CHLEN90B_PKIND (0x3bull)
#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
(0x100000ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6 | \
(uint64_t)(c) << 3)
#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
(0x100020ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6)
#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
(0x100028ull | (uint64_t)(a) << 14 | (uint64_t)(b) << 6)
#define NPC_AF_KPUX_ENTRY_DISX(a, b) \
(0x180000ull | (uint64_t)(a) << 6 | (uint64_t)(b) << 3)
#define NPC_AF_CPIX_CFG(a) \
(0x200000ull | (uint64_t)(a) << 3)
#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
(0x900000ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 12 | \
(uint64_t)(c) << 5 | (uint64_t)(d) << 3)
#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
(0x980000ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 12 | \
(uint64_t)(c) << 3)
#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
(0x1000000ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
(uint64_t)(c) << 3)
#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
(0x1000010ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
(uint64_t)(c) << 3)
#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
(0x1000020ull | (uint64_t)(a) << 10 | (uint64_t)(b) << 6 | \
(uint64_t)(c) << 3)
#define NPC_AF_MCAMEX_BANKX_CFG(a, b) \
(0x1800000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
(0x1880000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
#define NPC_AF_MATCH_STATX(a) \
(0x1880008ull | (uint64_t)(a) << 8)
#define NPC_AF_INTFX_MISS_STAT_ACT(a) \
(0x1880040ull + (uint64_t)(a) * 0x8)
#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) \
(0x1900000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
(0x1900008ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
#define NPC_AF_INTFX_MISS_ACT(a) \
(0x1a00000ull | (uint64_t)(a) << 4)
#define NPC_AF_INTFX_MISS_TAG_ACT(a) \
(0x1b00008ull | (uint64_t)(a) << 4)
#define NPC_AF_MCAM_BANKX_HITX(a, b) \
(0x1c80000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
#define NPC_AF_LKUP_CTL (0x2000000ull)
#define NPC_AF_LKUP_DATAX(a) \
(0x2000200ull | (uint64_t)(a) << 4)
#define NPC_AF_LKUP_RESULTX(a) \
(0x2000400ull | (uint64_t)(a) << 4)
#define NPC_AF_INTFX_STAT(a) \
(0x2000800ull | (uint64_t)(a) << 4)
#define NPC_AF_DBG_CTL (0x3000000ull)
#define NPC_AF_DBG_STATUS (0x3000010ull)
#define NPC_AF_KPUX_DBG(a) \
(0x3000020ull | (uint64_t)(a) << 8)
#define NPC_AF_IKPU_ERR_CTL (0x3000080ull)
#define NPC_AF_KPUX_ERR_CTL(a) \
(0x30000a0ull | (uint64_t)(a) << 8)
#define NPC_AF_MCAM_DBG (0x3001000ull)
#define NPC_AF_DBG_DATAX(a) \
(0x3001400ull | (uint64_t)(a) << 4)
#define NPC_AF_DBG_RESULTX(a) \
(0x3001800ull | (uint64_t)(a) << 4)
/* Enum offsets */
#define NPC_INTF_NIX0_RX (0x0ull)
#define NPC_INTF_NIX0_TX (0x1ull)
#define NPC_LKUPOP_PKT (0x0ull)
#define NPC_LKUPOP_KEY (0x1ull)
#define NPC_MCAM_KEY_X1 (0x0ull)
#define NPC_MCAM_KEY_X2 (0x1ull)
#define NPC_MCAM_KEY_X4 (0x2ull)
enum NPC_ERRLEV_E {
NPC_ERRLEV_RE = 0,
NPC_ERRLEV_LA = 1,
NPC_ERRLEV_LB = 2,
NPC_ERRLEV_LC = 3,
NPC_ERRLEV_LD = 4,
NPC_ERRLEV_LE = 5,
NPC_ERRLEV_LF = 6,
NPC_ERRLEV_LG = 7,
NPC_ERRLEV_LH = 8,
NPC_ERRLEV_R9 = 9,
NPC_ERRLEV_R10 = 10,
NPC_ERRLEV_R11 = 11,
NPC_ERRLEV_R12 = 12,
NPC_ERRLEV_R13 = 13,
NPC_ERRLEV_R14 = 14,
NPC_ERRLEV_NIX = 15,
NPC_ERRLEV_ENUM_LAST = 16,
};
enum npc_kpu_err_code {
NPC_EC_NOERR = 0, /* has to be zero */
NPC_EC_UNK,
NPC_EC_IH_LENGTH,
NPC_EC_EDSA_UNK,
NPC_EC_L2_K1,
NPC_EC_L2_K2,
NPC_EC_L2_K3,
NPC_EC_L2_K3_ETYPE_UNK,
NPC_EC_L2_K4,
NPC_EC_MPLS_2MANY,
NPC_EC_MPLS_UNK,
NPC_EC_NSH_UNK,
NPC_EC_IP_TTL_0,
NPC_EC_IP_FRAG_OFFSET_1,
NPC_EC_IP_VER,
NPC_EC_IP6_HOP_0,
NPC_EC_IP6_VER,
NPC_EC_TCP_FLAGS_FIN_ONLY,
NPC_EC_TCP_FLAGS_ZERO,
NPC_EC_TCP_FLAGS_RST_FIN,
NPC_EC_TCP_FLAGS_URG_SYN,
NPC_EC_TCP_FLAGS_RST_SYN,
NPC_EC_TCP_FLAGS_SYN_FIN,
NPC_EC_VXLAN,
NPC_EC_NVGRE,
NPC_EC_GRE,
NPC_EC_GRE_VER1,
NPC_EC_L4,
NPC_EC_OIP4_CSUM,
NPC_EC_IIP4_CSUM,
NPC_EC_LAST /* has to be the last item */
};
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
NPC_LID_LC,
NPC_LID_LD,
NPC_LID_LE,
NPC_LID_LF,
NPC_LID_LG,
NPC_LID_LH,
};
#define NPC_LT_NA 0
enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
NPC_LT_LA_IH_NIX_ETHER,
NPC_LT_LA_IH_8_ETHER,
NPC_LT_LA_IH_4_ETHER,
NPC_LT_LA_IH_2_ETHER,
NPC_LT_LA_HIGIG2_ETHER,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_LT_LA_CUSTOM_L2_90B_ETHER,
NPC_LT_LA_CPT_HDR,
NPC_LT_LA_CUSTOM_L2_24B_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
enum npc_kpu_lb_ltype {
NPC_LT_LB_ETAG = 1,
NPC_LT_LB_CTAG,
NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
NPC_LT_LB_ITAG,
NPC_LT_LB_DSA,
NPC_LT_LB_DSA_VLAN,
NPC_LT_LB_EDSA,
NPC_LT_LB_EDSA_VLAN,
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
NPC_LT_LB_FDSA,
NPC_LT_LB_VLAN_EXDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
enum npc_kpu_lc_ltype {
NPC_LT_LC_PTP = 1,
NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
NPC_LT_LC_ARP,
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
NPC_LT_LC_FCOE,
NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
};
/* Don't modify Ltypes up to SCTP, otherwise it will
* effect flow tag calculation and thus RSS.
*/
enum npc_kpu_ld_ltype {
NPC_LT_LD_TCP = 1,
NPC_LT_LD_UDP,
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
NPC_LT_LD_ICMP6,
NPC_LT_LD_CUSTOM0,
NPC_LT_LD_CUSTOM1,
NPC_LT_LD_IGMP = 8,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
NPC_LT_LD_NVGRE,
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
};
enum npc_kpu_le_ltype {
NPC_LT_LE_VXLAN = 1,
NPC_LT_LE_GENEVE,
NPC_LT_LE_ESP,
NPC_LT_LE_GTPU = 4,
NPC_LT_LE_VXLANGPE,
NPC_LT_LE_GTPC,
NPC_LT_LE_NSH,
NPC_LT_LE_TU_MPLS_IN_GRE,
NPC_LT_LE_TU_NSH_IN_GRE,
NPC_LT_LE_TU_MPLS_IN_UDP,
NPC_LT_LE_CUSTOM0 = 0xE,
NPC_LT_LE_CUSTOM1 = 0xF,
};
enum npc_kpu_lf_ltype {
NPC_LT_LF_TU_ETHER = 1,
NPC_LT_LF_TU_PPP,
NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
NPC_LT_LF_TU_NSH_IN_VXLANGPE,
NPC_LT_LF_TU_MPLS_IN_NSH,
NPC_LT_LF_TU_3RD_NSH,
NPC_LT_LF_CUSTOM0 = 0xE,
NPC_LT_LF_CUSTOM1 = 0xF,
};
enum npc_kpu_lg_ltype {
NPC_LT_LG_TU_IP = 1,
NPC_LT_LG_TU_IP6,
NPC_LT_LG_TU_ARP,
NPC_LT_LG_TU_ETHER_IN_NSH,
NPC_LT_LG_CUSTOM0 = 0xE,
NPC_LT_LG_CUSTOM1 = 0xF,
};
/* Don't modify Ltypes up to SCTP, otherwise it will
* effect flow tag calculation and thus RSS.
*/
enum npc_kpu_lh_ltype {
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
NPC_LT_LH_TU_ICMP,
NPC_LT_LH_TU_SCTP,
NPC_LT_LH_TU_ICMP6,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
NPC_LT_LH_CUSTOM0 = 0xE,
NPC_LT_LH_CUSTOM1 = 0xF,
};
/* Structures definitions */
struct npc_kpu_profile_cam {
uint8_t state;
uint8_t state_mask;
uint16_t dp0;
uint16_t dp0_mask;
uint16_t dp1;
uint16_t dp1_mask;
uint16_t dp2;
uint16_t dp2_mask;
};
struct npc_kpu_profile_action {
uint8_t errlev;
uint8_t errcode;
uint8_t dp0_offset;
uint8_t dp1_offset;
uint8_t dp2_offset;
uint8_t bypass_count;
uint8_t parse_done;
uint8_t next_state;
uint8_t ptr_advance;
uint8_t cap_ena;
uint8_t lid;
uint8_t ltype;
uint8_t flags;
uint8_t offset;
uint8_t mask;
uint8_t right;
uint8_t shift;
};
struct npc_kpu_profile {
int cam_entries;
int action_entries;
struct npc_kpu_profile_cam *cam;
struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
struct npc_kpu_cam {
uint64_t dp0_data : 16;
uint64_t dp1_data : 16;
uint64_t dp2_data : 16;
uint64_t state : 8;
uint64_t rsvd_63_56 : 8;
};
struct npc_kpu_action0 {
uint64_t var_len_shift : 3;
uint64_t var_len_right : 1;
uint64_t var_len_mask : 8;
uint64_t var_len_offset : 8;
uint64_t ptr_advance : 8;
uint64_t capture_flags : 8;
uint64_t capture_ltype : 4;
uint64_t capture_lid : 3;
uint64_t rsvd_43 : 1;
uint64_t next_state : 8;
uint64_t parse_done : 1;
uint64_t capture_ena : 1;
uint64_t byp_count : 3;
uint64_t rsvd_63_57 : 7;
};
struct npc_kpu_action1 {
uint64_t dp0_offset : 8;
uint64_t dp1_offset : 8;
uint64_t dp2_offset : 8;
uint64_t errcode : 8;
uint64_t errlev : 4;
uint64_t rsvd_63_36 : 28;
};
struct npc_kpu_pkind_cpi_def {
uint64_t cpi_base : 10;
uint64_t rsvd_11_10 : 2;
uint64_t add_shift : 3;
uint64_t rsvd_15 : 1;
uint64_t add_mask : 8;
uint64_t add_offset : 8;
uint64_t flags_mask : 8;
uint64_t flags_match : 8;
uint64_t ltype_mask : 4;
uint64_t ltype_match : 4;
uint64_t lid : 3;
uint64_t rsvd_62_59 : 4;
uint64_t ena : 1;
};
struct nix_rx_action {
uint64_t op :4;
uint64_t pf_func :16;
uint64_t index :20;
uint64_t match_id :16;
uint64_t flow_key_alg :5;
uint64_t rsvd_63_61 :3;
};
struct nix_tx_action {
uint64_t op :4;
uint64_t rsvd_11_4 :8;
uint64_t index :20;
uint64_t match_id :16;
uint64_t rsvd_63_48 :16;
};
/* NPC layer parse information structure */
struct npc_layer_info_s {
uint32_t lptr : 8;
uint32_t flags : 8;
uint32_t ltype : 4;
uint32_t rsvd_31_20 : 12;
};
/* NPC layer mcam search key extract structure */
struct npc_layer_kex_s {
uint16_t flags : 8;
uint16_t ltype : 4;
uint16_t rsvd_15_12 : 4;
};
/* NPC mcam search key x1 structure */
struct npc_mcam_key_x1_s {
uint64_t intf : 2;
uint64_t rsvd_63_2 : 62;
uint64_t kw0 : 64; /* W1 */
uint64_t kw1 : 48;
uint64_t rsvd_191_176 : 16;
};
/* NPC mcam search key x2 structure */
struct npc_mcam_key_x2_s {
uint64_t intf : 2;
uint64_t rsvd_63_2 : 62;
uint64_t kw0 : 64; /* W1 */
uint64_t kw1 : 64; /* W2 */
uint64_t kw2 : 64; /* W3 */
uint64_t kw3 : 32;
uint64_t rsvd_319_288 : 32;
};
/* NPC mcam search key x4 structure */
struct npc_mcam_key_x4_s {
uint64_t intf : 2;
uint64_t rsvd_63_2 : 62;
uint64_t kw0 : 64; /* W1 */
uint64_t kw1 : 64; /* W2 */
uint64_t kw2 : 64; /* W3 */
uint64_t kw3 : 64; /* W4 */
uint64_t kw4 : 64; /* W5 */
uint64_t kw5 : 64; /* W6 */
uint64_t kw6 : 64; /* W7 */
};
/* NPC parse key extract structure */
struct npc_parse_kex_s {
uint64_t chan : 12;
uint64_t errlev : 4;
uint64_t errcode : 8;
uint64_t l2m : 1;
uint64_t l2b : 1;
uint64_t l3m : 1;
uint64_t l3b : 1;
uint64_t la : 12;
uint64_t lb : 12;
uint64_t lc : 12;
uint64_t ld : 12;
uint64_t le : 12;
uint64_t lf : 12;
uint64_t lg : 12;
uint64_t lh : 12;
uint64_t rsvd_127_124 : 4;
};
/* NPC result structure */
struct npc_result_s {
uint64_t intf : 2;
uint64_t pkind : 6;
uint64_t chan : 12;
uint64_t errlev : 4;
uint64_t errcode : 8;
uint64_t l2m : 1;
uint64_t l2b : 1;
uint64_t l3m : 1;
uint64_t l3b : 1;
uint64_t eoh_ptr : 8;
uint64_t rsvd_63_44 : 20;
uint64_t action : 64; /* W1 */
uint64_t vtag_action : 64; /* W2 */
uint64_t la : 20;
uint64_t lb : 20;
uint64_t lc : 20;
uint64_t rsvd_255_252 : 4;
uint64_t ld : 20;
uint64_t le : 20;
uint64_t lf : 20;
uint64_t rsvd_319_316 : 4;
uint64_t lg : 20;
uint64_t lh : 20;
uint64_t rsvd_383_360 : 24;
};
#endif /* __OTX2_NPC_HW_H__ */

View File

@ -1,27 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __OTX2_REE_HW_H__
#define __OTX2_REE_HW_H__
/* REE BAR0*/
#define REE_AF_REEXM_MAX_MATCH (0x80c8)
/* REE BAR02 */
#define REE_LF_MISC_INT (0x300)
#define REE_LF_DONE_INT (0x120)
#define REE_AF_QUEX_GMCTL(a) (0x800 | (a) << 3)
#define REE_AF_INT_VEC_RAS (0x0ull)
#define REE_AF_INT_VEC_RVU (0x1ull)
#define REE_AF_INT_VEC_QUE_DONE (0x2ull)
#define REE_AF_INT_VEC_AQ (0x3ull)
/* ENUMS */
#define REE_LF_INT_VEC_QUE_DONE (0x0ull)
#define REE_LF_INT_VEC_MISC (0x1ull)
#endif /* __OTX2_REE_HW_H__*/

View File

@ -1,219 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_RVU_HW_H__
#define __OTX2_RVU_HW_H__
/* Register offsets */
#define RVU_AF_MSIXTR_BASE (0x10ull)
#define RVU_AF_BLK_RST (0x30ull)
#define RVU_AF_PF_BAR4_ADDR (0x40ull)
#define RVU_AF_RAS (0x100ull)
#define RVU_AF_RAS_W1S (0x108ull)
#define RVU_AF_RAS_ENA_W1S (0x110ull)
#define RVU_AF_RAS_ENA_W1C (0x118ull)
#define RVU_AF_GEN_INT (0x120ull)
#define RVU_AF_GEN_INT_W1S (0x128ull)
#define RVU_AF_GEN_INT_ENA_W1S (0x130ull)
#define RVU_AF_GEN_INT_ENA_W1C (0x138ull)
#define RVU_AF_AFPFX_MBOXX(a, b) \
(0x2000ull | (uint64_t)(a) << 4 | (uint64_t)(b) << 3)
#define RVU_AF_PFME_STATUS (0x2800ull)
#define RVU_AF_PFTRPEND (0x2810ull)
#define RVU_AF_PFTRPEND_W1S (0x2820ull)
#define RVU_AF_PF_RST (0x2840ull)
#define RVU_AF_HWVF_RST (0x2850ull)
#define RVU_AF_PFAF_MBOX_INT (0x2880ull)
#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888ull)
#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890ull)
#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898ull)
#define RVU_AF_PFFLR_INT (0x28a0ull)
#define RVU_AF_PFFLR_INT_W1S (0x28a8ull)
#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0ull)
#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8ull)
#define RVU_AF_PFME_INT (0x28c0ull)
#define RVU_AF_PFME_INT_W1S (0x28c8ull)
#define RVU_AF_PFME_INT_ENA_W1S (0x28d0ull)
#define RVU_AF_PFME_INT_ENA_W1C (0x28d8ull)
#define RVU_PRIV_CONST (0x8000000ull)
#define RVU_PRIV_GEN_CFG (0x8000010ull)
#define RVU_PRIV_CLK_CFG (0x8000020ull)
#define RVU_PRIV_ACTIVE_PC (0x8000030ull)
#define RVU_PRIV_PFX_CFG(a) (0x8000100ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_NIXX_CFG(a, b) \
(0x8000300ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
#define RVU_PRIV_PFX_NPA_CFG(a) (0x8000310ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_SSO_CFG(a) (0x8000320ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_SSOW_CFG(a) (0x8000330ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_TIM_CFG(a) (0x8000340ull | (uint64_t)(a) << 16)
#define RVU_PRIV_PFX_CPTX_CFG(a, b) \
(0x8000350ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400ull | (uint64_t)(a) << 3)
#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280ull | (uint64_t)(a) << 16)
#define RVU_PRIV_HWVFX_NIXX_CFG(a, b) \
(0x8001300ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
#define RVU_PRIV_HWVFX_NPA_CFG(a) (0x8001310ull | (uint64_t)(a) << 16)
#define RVU_PRIV_HWVFX_SSO_CFG(a) (0x8001320ull | (uint64_t)(a) << 16)
#define RVU_PRIV_HWVFX_SSOW_CFG(a) (0x8001330ull | (uint64_t)(a) << 16)
#define RVU_PRIV_HWVFX_TIM_CFG(a) (0x8001340ull | (uint64_t)(a) << 16)
#define RVU_PRIV_HWVFX_CPTX_CFG(a, b) \
(0x8001350ull | (uint64_t)(a) << 16 | (uint64_t)(b) << 3)
#define RVU_PF_VFX_PFVF_MBOXX(a, b) \
(0x0ull | (uint64_t)(a) << 12 | (uint64_t)(b) << 3)
#define RVU_PF_VF_BAR4_ADDR (0x10ull)
#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200ull | (uint64_t)(a) << 3)
#define RVU_PF_VFME_STATUSX(a) (0x800ull | (uint64_t)(a) << 3)
#define RVU_PF_VFTRPENDX(a) (0x820ull | (uint64_t)(a) << 3)
#define RVU_PF_VFTRPEND_W1SX(a) (0x840ull | (uint64_t)(a) << 3)
#define RVU_PF_VFPF_MBOX_INTX(a) (0x880ull | (uint64_t)(a) << 3)
#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8a0ull | (uint64_t)(a) << 3)
#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8c0ull | (uint64_t)(a) << 3)
#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8e0ull | (uint64_t)(a) << 3)
#define RVU_PF_VFFLR_INTX(a) (0x900ull | (uint64_t)(a) << 3)
#define RVU_PF_VFFLR_INT_W1SX(a) (0x920ull | (uint64_t)(a) << 3)
#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940ull | (uint64_t)(a) << 3)
#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960ull | (uint64_t)(a) << 3)
#define RVU_PF_VFME_INTX(a) (0x980ull | (uint64_t)(a) << 3)
#define RVU_PF_VFME_INT_W1SX(a) (0x9a0ull | (uint64_t)(a) << 3)
#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9c0ull | (uint64_t)(a) << 3)
#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9e0ull | (uint64_t)(a) << 3)
#define RVU_PF_PFAF_MBOXX(a) (0xc00ull | (uint64_t)(a) << 3)
#define RVU_PF_INT (0xc20ull)
#define RVU_PF_INT_W1S (0xc28ull)
#define RVU_PF_INT_ENA_W1S (0xc30ull)
#define RVU_PF_INT_ENA_W1C (0xc38ull)
#define RVU_PF_MSIX_VECX_ADDR(a) (0x80000ull | (uint64_t)(a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x80008ull | (uint64_t)(a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xf0000ull | (uint64_t)(a) << 3)
#define RVU_VF_VFPF_MBOXX(a) (0x0ull | (uint64_t)(a) << 3)
#define RVU_VF_INT (0x20ull)
#define RVU_VF_INT_W1S (0x28ull)
#define RVU_VF_INT_ENA_W1S (0x30ull)
#define RVU_VF_INT_ENA_W1C (0x38ull)
#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200ull | (uint64_t)(a) << 3)
#define RVU_VF_MSIX_VECX_ADDR(a) (0x80000ull | (uint64_t)(a) << 4)
#define RVU_VF_MSIX_VECX_CTL(a) (0x80008ull | (uint64_t)(a) << 4)
#define RVU_VF_MSIX_PBAX(a) (0xf0000ull | (uint64_t)(a) << 3)
/* Enum offsets */
#define RVU_BAR_RVU_PF_END_BAR0 (0x84f000000000ull)
#define RVU_BAR_RVU_PF_START_BAR0 (0x840000000000ull)
#define RVU_BAR_RVU_PFX_FUNCX_BAR2(a, b) \
(0x840200000000ull | ((uint64_t)(a) << 36) | ((uint64_t)(b) << 25))
#define RVU_AF_INT_VEC_POISON (0x0ull)
#define RVU_AF_INT_VEC_PFFLR (0x1ull)
#define RVU_AF_INT_VEC_PFME (0x2ull)
#define RVU_AF_INT_VEC_GEN (0x3ull)
#define RVU_AF_INT_VEC_MBOX (0x4ull)
#define RVU_BLOCK_TYPE_RVUM (0x0ull)
#define RVU_BLOCK_TYPE_LMT (0x2ull)
#define RVU_BLOCK_TYPE_NIX (0x3ull)
#define RVU_BLOCK_TYPE_NPA (0x4ull)
#define RVU_BLOCK_TYPE_NPC (0x5ull)
#define RVU_BLOCK_TYPE_SSO (0x6ull)
#define RVU_BLOCK_TYPE_SSOW (0x7ull)
#define RVU_BLOCK_TYPE_TIM (0x8ull)
#define RVU_BLOCK_TYPE_CPT (0x9ull)
#define RVU_BLOCK_TYPE_NDC (0xaull)
#define RVU_BLOCK_TYPE_DDF (0xbull)
#define RVU_BLOCK_TYPE_ZIP (0xcull)
#define RVU_BLOCK_TYPE_RAD (0xdull)
#define RVU_BLOCK_TYPE_DFA (0xeull)
#define RVU_BLOCK_TYPE_HNA (0xfull)
#define RVU_BLOCK_TYPE_REE (0xeull)
#define RVU_BLOCK_ADDR_RVUM (0x0ull)
#define RVU_BLOCK_ADDR_LMT (0x1ull)
#define RVU_BLOCK_ADDR_NPA (0x3ull)
#define RVU_BLOCK_ADDR_NIX0 (0x4ull)
#define RVU_BLOCK_ADDR_NIX1 (0x5ull)
#define RVU_BLOCK_ADDR_NPC (0x6ull)
#define RVU_BLOCK_ADDR_SSO (0x7ull)
#define RVU_BLOCK_ADDR_SSOW (0x8ull)
#define RVU_BLOCK_ADDR_TIM (0x9ull)
#define RVU_BLOCK_ADDR_CPT0 (0xaull)
#define RVU_BLOCK_ADDR_CPT1 (0xbull)
#define RVU_BLOCK_ADDR_NDC0 (0xcull)
#define RVU_BLOCK_ADDR_NDC1 (0xdull)
#define RVU_BLOCK_ADDR_NDC2 (0xeull)
#define RVU_BLOCK_ADDR_R_END (0x1full)
#define RVU_BLOCK_ADDR_R_START (0x14ull)
#define RVU_BLOCK_ADDR_REE0 (0x14ull)
#define RVU_BLOCK_ADDR_REE1 (0x15ull)
#define RVU_VF_INT_VEC_MBOX (0x0ull)
#define RVU_PF_INT_VEC_AFPF_MBOX (0x6ull)
#define RVU_PF_INT_VEC_VFFLR0 (0x0ull)
#define RVU_PF_INT_VEC_VFFLR1 (0x1ull)
#define RVU_PF_INT_VEC_VFME0 (0x2ull)
#define RVU_PF_INT_VEC_VFME1 (0x3ull)
#define RVU_PF_INT_VEC_VFPF_MBOX0 (0x4ull)
#define RVU_PF_INT_VEC_VFPF_MBOX1 (0x5ull)
#define AF_BAR2_ALIASX_SIZE (0x100000ull)
#define TIM_AF_BAR2_SEL (0x9000000ull)
#define SSO_AF_BAR2_SEL (0x9000000ull)
#define NIX_AF_BAR2_SEL (0x9000000ull)
#define SSOW_AF_BAR2_SEL (0x9000000ull)
#define NPA_AF_BAR2_SEL (0x9000000ull)
#define CPT_AF_BAR2_SEL (0x9000000ull)
#define RVU_AF_BAR2_SEL (0x9000000ull)
#define REE_AF_BAR2_SEL (0x9000000ull)
#define AF_BAR2_ALIASX(a, b) \
(0x9100000ull | (uint64_t)(a) << 12 | (uint64_t)(b))
#define TIM_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
#define SSO_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
#define NIX_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
#define SSOW_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
#define NPA_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
#define RVU_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
#define REE_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
/* Structures definitions */
/* RVU admin function register address structure */
struct rvu_af_addr_s {
uint64_t addr : 28;
uint64_t block : 5;
uint64_t rsvd_63_33 : 31;
};
/* RVU function-unique address structure */
struct rvu_func_addr_s {
uint32_t addr : 12;
uint32_t lf_slot : 8;
uint32_t block : 5;
uint32_t rsvd_31_25 : 7;
};
/* RVU msi-x vector structure */
struct rvu_msix_vec_s {
uint64_t addr : 64; /* W0 */
uint64_t data : 32;
uint64_t mask : 1;
uint64_t pend : 1;
uint64_t rsvd_127_98 : 30;
};
/* RVU pf function identification structure */
struct rvu_pf_func_s {
uint16_t func : 10;
uint16_t pf : 6;
};
#endif /* __OTX2_RVU_HW_H__ */

View File

@ -1,184 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_SDP_HW_H_
#define __OTX2_SDP_HW_H_
/* SDP VF IOQs */
#define SDP_MIN_RINGS_PER_VF (1)
#define SDP_MAX_RINGS_PER_VF (8)
/* SDP VF IQ configuration */
#define SDP_VF_MAX_IQ_DESCRIPTORS (512)
#define SDP_VF_MIN_IQ_DESCRIPTORS (128)
#define SDP_VF_DB_MIN (1)
#define SDP_VF_DB_TIMEOUT (1)
#define SDP_VF_INTR_THRESHOLD (0xFFFFFFFF)
#define SDP_VF_64BYTE_INSTR (64)
#define SDP_VF_32BYTE_INSTR (32)
/* SDP VF OQ configuration */
#define SDP_VF_MAX_OQ_DESCRIPTORS (512)
#define SDP_VF_MIN_OQ_DESCRIPTORS (128)
#define SDP_VF_OQ_BUF_SIZE (2048)
#define SDP_VF_OQ_REFIL_THRESHOLD (16)
#define SDP_VF_OQ_INFOPTR_MODE (1)
#define SDP_VF_OQ_BUFPTR_MODE (0)
#define SDP_VF_OQ_INTR_PKT (1)
#define SDP_VF_OQ_INTR_TIME (10)
#define SDP_VF_CFG_IO_QUEUES SDP_MAX_RINGS_PER_VF
/* Wait time in milliseconds for FLR */
#define SDP_VF_PCI_FLR_WAIT (100)
#define SDP_VF_BUSY_LOOP_COUNT (10000)
#define SDP_VF_MAX_IO_QUEUES SDP_MAX_RINGS_PER_VF
#define SDP_VF_MIN_IO_QUEUES SDP_MIN_RINGS_PER_VF
/* SDP VF IOQs per rawdev */
#define SDP_VF_MAX_IOQS_PER_RAWDEV SDP_VF_MAX_IO_QUEUES
#define SDP_VF_DEFAULT_IOQS_PER_RAWDEV SDP_VF_MIN_IO_QUEUES
/* SDP VF Register definitions */
#define SDP_VF_RING_OFFSET (0x1ull << 17)
/* SDP VF IQ Registers */
#define SDP_VF_R_IN_CONTROL_START (0x10000)
#define SDP_VF_R_IN_ENABLE_START (0x10010)
#define SDP_VF_R_IN_INSTR_BADDR_START (0x10020)
#define SDP_VF_R_IN_INSTR_RSIZE_START (0x10030)
#define SDP_VF_R_IN_INSTR_DBELL_START (0x10040)
#define SDP_VF_R_IN_CNTS_START (0x10050)
#define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
#define SDP_VF_R_IN_PKT_CNT_START (0x10080)
#define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
#define SDP_VF_R_IN_CONTROL(ring) \
(SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_ENABLE(ring) \
(SDP_VF_R_IN_ENABLE_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_INSTR_BADDR(ring) \
(SDP_VF_R_IN_INSTR_BADDR_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_INSTR_RSIZE(ring) \
(SDP_VF_R_IN_INSTR_RSIZE_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_INSTR_DBELL(ring) \
(SDP_VF_R_IN_INSTR_DBELL_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_CNTS(ring) \
(SDP_VF_R_IN_CNTS_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_INT_LEVELS(ring) \
(SDP_VF_R_IN_INT_LEVELS_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_PKT_CNT(ring) \
(SDP_VF_R_IN_PKT_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_IN_BYTE_CNT(ring) \
(SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
/* SDP VF IQ Masks */
#define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
#define SDP_VF_R_IN_CTL_RPVF_POS (48)
#define SDP_VF_R_IN_CTL_IDLE (0x1ull << 28)
#define SDP_VF_R_IN_CTL_RDSIZE (0x3ull << 25) /* Setting to max(4) */
#define SDP_VF_R_IN_CTL_IS_64B (0x1ull << 24)
#define SDP_VF_R_IN_CTL_D_NSR (0x1ull << 8)
#define SDP_VF_R_IN_CTL_D_ESR (0x1ull << 6)
#define SDP_VF_R_IN_CTL_D_ROR (0x1ull << 5)
#define SDP_VF_R_IN_CTL_NSR (0x1ull << 3)
#define SDP_VF_R_IN_CTL_ESR (0x1ull << 1)
#define SDP_VF_R_IN_CTL_ROR (0x1ull << 0)
#define SDP_VF_R_IN_CTL_MASK \
(SDP_VF_R_IN_CTL_RDSIZE | SDP_VF_R_IN_CTL_IS_64B)
/* SDP VF OQ Registers */
#define SDP_VF_R_OUT_CNTS_START (0x10100)
#define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
#define SDP_VF_R_OUT_SLIST_BADDR_START (0x10120)
#define SDP_VF_R_OUT_SLIST_RSIZE_START (0x10130)
#define SDP_VF_R_OUT_SLIST_DBELL_START (0x10140)
#define SDP_VF_R_OUT_CONTROL_START (0x10150)
#define SDP_VF_R_OUT_ENABLE_START (0x10160)
#define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
#define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
#define SDP_VF_R_OUT_CONTROL(ring) \
(SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_ENABLE(ring) \
(SDP_VF_R_OUT_ENABLE_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_SLIST_BADDR(ring) \
(SDP_VF_R_OUT_SLIST_BADDR_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_SLIST_RSIZE(ring) \
(SDP_VF_R_OUT_SLIST_RSIZE_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_SLIST_DBELL(ring) \
(SDP_VF_R_OUT_SLIST_DBELL_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_CNTS(ring) \
(SDP_VF_R_OUT_CNTS_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_INT_LEVELS(ring) \
(SDP_VF_R_OUT_INT_LEVELS_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_PKT_CNT(ring) \
(SDP_VF_R_OUT_PKT_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
#define SDP_VF_R_OUT_BYTE_CNT(ring) \
(SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
/* SDP VF OQ Masks */
#define SDP_VF_R_OUT_CTL_IDLE (1ull << 40)
#define SDP_VF_R_OUT_CTL_ES_I (1ull << 34)
#define SDP_VF_R_OUT_CTL_NSR_I (1ull << 33)
#define SDP_VF_R_OUT_CTL_ROR_I (1ull << 32)
#define SDP_VF_R_OUT_CTL_ES_D (1ull << 30)
#define SDP_VF_R_OUT_CTL_NSR_D (1ull << 29)
#define SDP_VF_R_OUT_CTL_ROR_D (1ull << 28)
#define SDP_VF_R_OUT_CTL_ES_P (1ull << 26)
#define SDP_VF_R_OUT_CTL_NSR_P (1ull << 25)
#define SDP_VF_R_OUT_CTL_ROR_P (1ull << 24)
#define SDP_VF_R_OUT_CTL_IMODE (1ull << 23)
#define SDP_VF_R_OUT_INT_LEVELS_BMODE (1ull << 63)
#define SDP_VF_R_OUT_INT_LEVELS_TIMET (32)
/* SDP Instruction Header */
struct sdp_instr_ih {
/* Data Len */
uint64_t tlen:16;
/* Reserved1 */
uint64_t rsvd1:20;
/* PKIND for SDP */
uint64_t pkind:6;
/* Front Data size */
uint64_t fsz:6;
/* No. of entries in gather list */
uint64_t gsz:14;
/* Gather indicator */
uint64_t gather:1;
/* Reserved2 */
uint64_t rsvd2:1;
} __rte_packed;
#endif /* __OTX2_SDP_HW_H_ */

View File

@ -1,209 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_SSO_HW_H__
#define __OTX2_SSO_HW_H__
/* Register offsets */
#define SSO_AF_CONST (0x1000ull)
#define SSO_AF_CONST1 (0x1008ull)
#define SSO_AF_WQ_INT_PC (0x1020ull)
#define SSO_AF_NOS_CNT (0x1050ull)
#define SSO_AF_AW_WE (0x1080ull)
#define SSO_AF_WS_CFG (0x1088ull)
#define SSO_AF_GWE_CFG (0x1098ull)
#define SSO_AF_GWE_RANDOM (0x10b0ull)
#define SSO_AF_LF_HWGRP_RST (0x10e0ull)
#define SSO_AF_AW_CFG (0x10f0ull)
#define SSO_AF_BLK_RST (0x10f8ull)
#define SSO_AF_ACTIVE_CYCLES0 (0x1100ull)
#define SSO_AF_ACTIVE_CYCLES1 (0x1108ull)
#define SSO_AF_ACTIVE_CYCLES2 (0x1110ull)
#define SSO_AF_ERR0 (0x1220ull)
#define SSO_AF_ERR0_W1S (0x1228ull)
#define SSO_AF_ERR0_ENA_W1C (0x1230ull)
#define SSO_AF_ERR0_ENA_W1S (0x1238ull)
#define SSO_AF_ERR2 (0x1260ull)
#define SSO_AF_ERR2_W1S (0x1268ull)
#define SSO_AF_ERR2_ENA_W1C (0x1270ull)
#define SSO_AF_ERR2_ENA_W1S (0x1278ull)
#define SSO_AF_UNMAP_INFO (0x12f0ull)
#define SSO_AF_UNMAP_INFO2 (0x1300ull)
#define SSO_AF_UNMAP_INFO3 (0x1310ull)
#define SSO_AF_RAS (0x1420ull)
#define SSO_AF_RAS_W1S (0x1430ull)
#define SSO_AF_RAS_ENA_W1C (0x1460ull)
#define SSO_AF_RAS_ENA_W1S (0x1470ull)
#define SSO_AF_AW_INP_CTL (0x2070ull)
#define SSO_AF_AW_ADD (0x2080ull)
#define SSO_AF_AW_READ_ARB (0x2090ull)
#define SSO_AF_XAQ_REQ_PC (0x20b0ull)
#define SSO_AF_XAQ_LATENCY_PC (0x20b8ull)
#define SSO_AF_TAQ_CNT (0x20c0ull)
#define SSO_AF_TAQ_ADD (0x20e0ull)
#define SSO_AF_POISONX(a) (0x2100ull | (uint64_t)(a) << 3)
#define SSO_AF_POISONX_W1S(a) (0x2200ull | (uint64_t)(a) << 3)
#define SSO_PRIV_AF_INT_CFG (0x3000ull)
#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800ull)
#define SSO_PRIV_LFX_HWGRP_CFG(a) (0x10000ull | (uint64_t)(a) << 3)
#define SSO_PRIV_LFX_HWGRP_INT_CFG(a) (0x20000ull | (uint64_t)(a) << 3)
#define SSO_AF_IU_ACCNTX_CFG(a) (0x50000ull | (uint64_t)(a) << 3)
#define SSO_AF_IU_ACCNTX_RST(a) (0x60000ull | (uint64_t)(a) << 3)
#define SSO_AF_XAQX_HEAD_PTR(a) (0x80000ull | (uint64_t)(a) << 3)
#define SSO_AF_XAQX_TAIL_PTR(a) (0x90000ull | (uint64_t)(a) << 3)
#define SSO_AF_XAQX_HEAD_NEXT(a) (0xa0000ull | (uint64_t)(a) << 3)
#define SSO_AF_XAQX_TAIL_NEXT(a) (0xb0000ull | (uint64_t)(a) << 3)
#define SSO_AF_TIAQX_STATUS(a) (0xc0000ull | (uint64_t)(a) << 3)
#define SSO_AF_TOAQX_STATUS(a) (0xd0000ull | (uint64_t)(a) << 3)
#define SSO_AF_XAQX_GMCTL(a) (0xe0000ull | (uint64_t)(a) << 3)
#define SSO_AF_HWGRPX_IAQ_THR(a) (0x200000ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_TAQ_THR(a) (0x200010ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_PRI(a) (0x200020ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_WS_PC(a) (0x200050ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_EXT_PC(a) (0x200060ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_WA_PC(a) (0x200070ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_TS_PC(a) (0x200080ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_DS_PC(a) (0x200090ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_DQ_PC(a) (0x2000A0ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_PAGE_CNT(a) (0x200100ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_AW_STATUS(a) (0x200110ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_AW_CFG(a) (0x200120ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_AW_TAGSPACE(a) (0x200130ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_XAQ_AURA(a) (0x200140ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_XAQ_LIMIT(a) (0x200220ull | (uint64_t)(a) << 12)
#define SSO_AF_HWGRPX_IU_ACCNT(a) (0x200230ull | (uint64_t)(a) << 12)
#define SSO_AF_HWSX_ARB(a) (0x400100ull | (uint64_t)(a) << 12)
#define SSO_AF_HWSX_INV(a) (0x400180ull | (uint64_t)(a) << 12)
#define SSO_AF_HWSX_GMCTL(a) (0x400200ull | (uint64_t)(a) << 12)
#define SSO_AF_HWSX_SX_GRPMSKX(a, b, c) \
(0x400400ull | (uint64_t)(a) << 12 | (uint64_t)(b) << 5 | \
(uint64_t)(c) << 3)
#define SSO_AF_IPL_FREEX(a) (0x800000ull | (uint64_t)(a) << 3)
#define SSO_AF_IPL_IAQX(a) (0x840000ull | (uint64_t)(a) << 3)
#define SSO_AF_IPL_DESCHEDX(a) (0x860000ull | (uint64_t)(a) << 3)
#define SSO_AF_IPL_CONFX(a) (0x880000ull | (uint64_t)(a) << 3)
#define SSO_AF_NPA_DIGESTX(a) (0x900000ull | (uint64_t)(a) << 3)
#define SSO_AF_NPA_DIGESTX_W1S(a) (0x900100ull | (uint64_t)(a) << 3)
#define SSO_AF_BFP_DIGESTX(a) (0x900200ull | (uint64_t)(a) << 3)
#define SSO_AF_BFP_DIGESTX_W1S(a) (0x900300ull | (uint64_t)(a) << 3)
#define SSO_AF_BFPN_DIGESTX(a) (0x900400ull | (uint64_t)(a) << 3)
#define SSO_AF_BFPN_DIGESTX_W1S(a) (0x900500ull | (uint64_t)(a) << 3)
#define SSO_AF_GRPDIS_DIGESTX(a) (0x900600ull | (uint64_t)(a) << 3)
#define SSO_AF_GRPDIS_DIGESTX_W1S(a) (0x900700ull | (uint64_t)(a) << 3)
#define SSO_AF_AWEMPTY_DIGESTX(a) (0x900800ull | (uint64_t)(a) << 3)
#define SSO_AF_AWEMPTY_DIGESTX_W1S(a) (0x900900ull | (uint64_t)(a) << 3)
#define SSO_AF_WQP0_DIGESTX(a) (0x900a00ull | (uint64_t)(a) << 3)
#define SSO_AF_WQP0_DIGESTX_W1S(a) (0x900b00ull | (uint64_t)(a) << 3)
#define SSO_AF_AW_DROPPED_DIGESTX(a) (0x900c00ull | (uint64_t)(a) << 3)
#define SSO_AF_AW_DROPPED_DIGESTX_W1S(a) (0x900d00ull | (uint64_t)(a) << 3)
#define SSO_AF_QCTLDIS_DIGESTX(a) (0x900e00ull | (uint64_t)(a) << 3)
#define SSO_AF_QCTLDIS_DIGESTX_W1S(a) (0x900f00ull | (uint64_t)(a) << 3)
#define SSO_AF_XAQDIS_DIGESTX(a) (0x901000ull | (uint64_t)(a) << 3)
#define SSO_AF_XAQDIS_DIGESTX_W1S(a) (0x901100ull | (uint64_t)(a) << 3)
#define SSO_AF_FLR_AQ_DIGESTX(a) (0x901200ull | (uint64_t)(a) << 3)
#define SSO_AF_FLR_AQ_DIGESTX_W1S(a) (0x901300ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_GMULTI_DIGESTX(a) (0x902000ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_GMULTI_DIGESTX_W1S(a) (0x902100ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_GUNMAP_DIGESTX(a) (0x902200ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_GUNMAP_DIGESTX_W1S(a) (0x902300ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_AWE_DIGESTX(a) (0x902400ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_AWE_DIGESTX_W1S(a) (0x902500ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_GWI_DIGESTX(a) (0x902600ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_GWI_DIGESTX_W1S(a) (0x902700ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_NE_DIGESTX(a) (0x902800ull | (uint64_t)(a) << 3)
#define SSO_AF_WS_NE_DIGESTX_W1S(a) (0x902900ull | (uint64_t)(a) << 3)
#define SSO_AF_IENTX_TAG(a) (0xa00000ull | (uint64_t)(a) << 3)
#define SSO_AF_IENTX_GRP(a) (0xa20000ull | (uint64_t)(a) << 3)
#define SSO_AF_IENTX_PENDTAG(a) (0xa40000ull | (uint64_t)(a) << 3)
#define SSO_AF_IENTX_LINKS(a) (0xa60000ull | (uint64_t)(a) << 3)
#define SSO_AF_IENTX_QLINKS(a) (0xa80000ull | (uint64_t)(a) << 3)
#define SSO_AF_IENTX_WQP(a) (0xaa0000ull | (uint64_t)(a) << 3)
#define SSO_AF_TAQX_LINK(a) (0xc00000ull | (uint64_t)(a) << 3)
#define SSO_AF_TAQX_WAEX_TAG(a, b) \
(0xe00000ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
#define SSO_AF_TAQX_WAEX_WQP(a, b) \
(0xe00008ull | (uint64_t)(a) << 8 | (uint64_t)(b) << 4)
#define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull)
#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
#define SSO_LF_GGRP_QCTL (0x20ull)
#define SSO_LF_GGRP_EXE_DIS (0x80ull)
#define SSO_LF_GGRP_INT (0x100ull)
#define SSO_LF_GGRP_INT_W1S (0x108ull)
#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
#define SSO_LF_GGRP_INT_THR (0x140ull)
#define SSO_LF_GGRP_INT_CNT (0x180ull)
#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
#define SSO_LF_GGRP_MISC_CNT (0x200ull)
#define SSO_AF_IAQ_FREE_CNT_MASK 0x3FFFull
#define SSO_AF_IAQ_RSVD_FREE_MASK 0x3FFFull
#define SSO_AF_IAQ_RSVD_FREE_SHIFT 16
#define SSO_AF_IAQ_FREE_CNT_MAX SSO_AF_IAQ_FREE_CNT_MASK
#define SSO_AF_AW_ADD_RSVD_FREE_MASK 0x3FFFull
#define SSO_AF_AW_ADD_RSVD_FREE_SHIFT 16
#define SSO_HWGRP_IAQ_MAX_THR_MASK 0x3FFFull
#define SSO_HWGRP_IAQ_RSVD_THR_MASK 0x3FFFull
#define SSO_HWGRP_IAQ_MAX_THR_SHIFT 32
#define SSO_HWGRP_IAQ_RSVD_THR 0x2
#define SSO_AF_TAQ_FREE_CNT_MASK 0x7FFull
#define SSO_AF_TAQ_RSVD_FREE_MASK 0x7FFull
#define SSO_AF_TAQ_RSVD_FREE_SHIFT 16
#define SSO_AF_TAQ_FREE_CNT_MAX SSO_AF_TAQ_FREE_CNT_MASK
#define SSO_AF_TAQ_ADD_RSVD_FREE_MASK 0x1FFFull
#define SSO_AF_TAQ_ADD_RSVD_FREE_SHIFT 16
#define SSO_HWGRP_TAQ_MAX_THR_MASK 0x7FFull
#define SSO_HWGRP_TAQ_RSVD_THR_MASK 0x7FFull
#define SSO_HWGRP_TAQ_MAX_THR_SHIFT 32
#define SSO_HWGRP_TAQ_RSVD_THR 0x3
#define SSO_HWGRP_PRI_AFF_MASK 0xFull
#define SSO_HWGRP_PRI_AFF_SHIFT 8
#define SSO_HWGRP_PRI_WGT_MASK 0x3Full
#define SSO_HWGRP_PRI_WGT_SHIFT 16
#define SSO_HWGRP_PRI_WGT_LEFT_MASK 0x3Full
#define SSO_HWGRP_PRI_WGT_LEFT_SHIFT 24
#define SSO_HWGRP_AW_CFG_RWEN BIT_ULL(0)
#define SSO_HWGRP_AW_CFG_LDWB BIT_ULL(1)
#define SSO_HWGRP_AW_CFG_LDT BIT_ULL(2)
#define SSO_HWGRP_AW_CFG_STT BIT_ULL(3)
#define SSO_HWGRP_AW_CFG_XAQ_BYP_DIS BIT_ULL(4)
#define SSO_HWGRP_AW_STS_TPTR_VLD BIT_ULL(8)
#define SSO_HWGRP_AW_STS_NPA_FETCH BIT_ULL(9)
#define SSO_HWGRP_AW_STS_XAQ_BUFSC_MASK 0x7ull
#define SSO_HWGRP_AW_STS_INIT_STS 0x18ull
/* Enum offsets */
#define SSO_LF_INT_VEC_GRP (0x0ull)
#define SSO_AF_INT_VEC_ERR0 (0x0ull)
#define SSO_AF_INT_VEC_ERR2 (0x1ull)
#define SSO_AF_INT_VEC_RAS (0x2ull)
#define SSO_WA_IOBN (0x0ull)
#define SSO_WA_NIXRX (0x1ull)
#define SSO_WA_CPT (0x2ull)
#define SSO_WA_ADDWQ (0x3ull)
#define SSO_WA_DPI (0x4ull)
#define SSO_WA_NIXTX (0x5ull)
#define SSO_WA_TIM (0x6ull)
#define SSO_WA_ZIP (0x7ull)
#define SSO_TT_ORDERED (0x0ull)
#define SSO_TT_ATOMIC (0x1ull)
#define SSO_TT_UNTAGGED (0x2ull)
#define SSO_TT_EMPTY (0x3ull)
/* Structures definitions */
#endif /* __OTX2_SSO_HW_H__ */

View File

@ -1,56 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_SSOW_HW_H__
#define __OTX2_SSOW_HW_H__
/* Register offsets */
#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x10ull)
#define SSOW_AF_LF_HWS_RST (0x30ull)
#define SSOW_PRIV_LFX_HWS_CFG(a) (0x1000ull | (uint64_t)(a) << 3)
#define SSOW_PRIV_LFX_HWS_INT_CFG(a) (0x2000ull | (uint64_t)(a) << 3)
#define SSOW_AF_SCRATCH_WS (0x100000ull)
#define SSOW_AF_SCRATCH_GW (0x200000ull)
#define SSOW_AF_SCRATCH_AW (0x300000ull)
#define SSOW_LF_GWS_LINKS (0x10ull)
#define SSOW_LF_GWS_PENDWQP (0x40ull)
#define SSOW_LF_GWS_PENDSTATE (0x50ull)
#define SSOW_LF_GWS_NW_TIM (0x70ull)
#define SSOW_LF_GWS_GRPMSK_CHG (0x80ull)
#define SSOW_LF_GWS_INT (0x100ull)
#define SSOW_LF_GWS_INT_W1S (0x108ull)
#define SSOW_LF_GWS_INT_ENA_W1S (0x110ull)
#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
#define SSOW_LF_GWS_TAG (0x200ull)
#define SSOW_LF_GWS_WQP (0x210ull)
#define SSOW_LF_GWS_SWTP (0x220ull)
#define SSOW_LF_GWS_PENDTAG (0x230ull)
#define SSOW_LF_GWS_OP_ALLOC_WE (0x400ull)
#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
#define SSOW_LF_GWS_OP_SWTAG_UNTAG (0x810ull)
#define SSOW_LF_GWS_OP_SWTP_CLR (0x820ull)
#define SSOW_LF_GWS_OP_UPD_WQP_GRP0 (0x830ull)
#define SSOW_LF_GWS_OP_UPD_WQP_GRP1 (0x838ull)
#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
#define SSOW_LF_GWS_OP_DESCHED_NOSCH (0x8c0ull)
#define SSOW_LF_GWS_OP_SWTAG_DESCHED (0x980ull)
#define SSOW_LF_GWS_OP_SWTAG_NOSCHED (0x9c0ull)
#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xa00ull)
#define SSOW_LF_GWS_OP_CLR_NSCHED1 (0xa08ull)
#define SSOW_LF_GWS_OP_SWTP_SET (0xc00ull)
#define SSOW_LF_GWS_OP_SWTAG_NORM (0xc10ull)
#define SSOW_LF_GWS_OP_SWTAG_FULL0 (0xc20ull)
#define SSOW_LF_GWS_OP_SWTAG_FULL1 (0xc28ull)
#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
/* Enum offsets */
#define SSOW_LF_INT_VEC_IOP (0x0ull)
#endif /* __OTX2_SSOW_HW_H__ */

View File

@ -1,34 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_TIM_HW_H__
#define __OTX2_TIM_HW_H__
/* TIM */
#define TIM_AF_CONST (0x90)
#define TIM_PRIV_LFX_CFG(a) (0x20000 | (a) << 3)
#define TIM_PRIV_LFX_INT_CFG(a) (0x24000 | (a) << 3)
#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
#define TIM_AF_BLK_RST (0x10)
#define TIM_AF_LF_RST (0x20)
#define TIM_AF_BLK_RST (0x10)
#define TIM_AF_RINGX_GMCTL(a) (0x2000 | (a) << 3)
#define TIM_AF_RINGX_CTL0(a) (0x4000 | (a) << 3)
#define TIM_AF_RINGX_CTL1(a) (0x6000 | (a) << 3)
#define TIM_AF_RINGX_CTL2(a) (0x8000 | (a) << 3)
#define TIM_AF_FLAGS_REG (0x80)
#define TIM_AF_FLAGS_REG_ENA_TIM BIT_ULL(0)
#define TIM_AF_RINGX_CTL1_ENA BIT_ULL(47)
#define TIM_AF_RINGX_CTL1_RCF_BUSY BIT_ULL(50)
#define TIM_AF_RINGX_CLT1_CLK_10NS (0)
#define TIM_AF_RINGX_CLT1_CLK_GPIO (1)
#define TIM_AF_RINGX_CLT1_CLK_GTI (2)
#define TIM_AF_RINGX_CLT1_CLK_PTP (3)
/* ENUMS */
#define TIM_LF_INT_VEC_NRSPERR_INT (0x0ull)
#define TIM_LF_INT_VEC_RAS_INT (0x1ull)
#endif /* __OTX2_TIM_HW_H__ */

View File

@ -1,24 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(C) 2019 Marvell International Ltd.
#
if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
build = false
reason = 'only supported on 64-bit Linux'
subdir_done()
endif
sources= files(
'otx2_common.c',
'otx2_dev.c',
'otx2_irq.c',
'otx2_mbox.c',
'otx2_sec_idev.c',
)
deps = ['eal', 'pci', 'ethdev', 'kvargs']
includes += include_directories(
'../../common/octeontx2',
'../../mempool/octeontx2',
'../../bus/pci',
)

View File

@ -1,216 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_log.h>
#include "otx2_common.h"
#include "otx2_dev.h"
#include "otx2_mbox.h"
/**
* @internal
* Set default NPA configuration.
*/
void
otx2_npa_set_defaults(struct otx2_idev_cfg *idev)
{
idev->npa_pf_func = 0;
rte_atomic16_set(&idev->npa_refcnt, 0);
}
/**
* @internal
* Get intra device config structure.
*/
struct otx2_idev_cfg *
otx2_intra_dev_get_cfg(void)
{
const char name[] = "octeontx2_intra_device_conf";
const struct rte_memzone *mz;
struct otx2_idev_cfg *idev;
mz = rte_memzone_lookup(name);
if (mz != NULL)
return mz->addr;
/* Request for the first time */
mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_cfg),
SOCKET_ID_ANY, 0, OTX2_ALIGN);
if (mz != NULL) {
idev = mz->addr;
idev->sso_pf_func = 0;
idev->npa_lf = NULL;
otx2_npa_set_defaults(idev);
return idev;
}
return NULL;
}
/**
* @internal
* Get SSO PF_FUNC.
*/
uint16_t
otx2_sso_pf_func_get(void)
{
struct otx2_idev_cfg *idev;
uint16_t sso_pf_func;
sso_pf_func = 0;
idev = otx2_intra_dev_get_cfg();
if (idev != NULL)
sso_pf_func = idev->sso_pf_func;
return sso_pf_func;
}
/**
* @internal
* Set SSO PF_FUNC.
*/
void
otx2_sso_pf_func_set(uint16_t sso_pf_func)
{
struct otx2_idev_cfg *idev;
idev = otx2_intra_dev_get_cfg();
if (idev != NULL) {
idev->sso_pf_func = sso_pf_func;
rte_smp_wmb();
}
}
/**
* @internal
* Get NPA PF_FUNC.
*/
uint16_t
otx2_npa_pf_func_get(void)
{
struct otx2_idev_cfg *idev;
uint16_t npa_pf_func;
npa_pf_func = 0;
idev = otx2_intra_dev_get_cfg();
if (idev != NULL)
npa_pf_func = idev->npa_pf_func;
return npa_pf_func;
}
/**
* @internal
* Get NPA lf object.
*/
struct otx2_npa_lf *
otx2_npa_lf_obj_get(void)
{
struct otx2_idev_cfg *idev;
idev = otx2_intra_dev_get_cfg();
if (idev != NULL && rte_atomic16_read(&idev->npa_refcnt))
return idev->npa_lf;
return NULL;
}
/**
* @internal
* Is NPA lf active for the given device?.
*/
int
otx2_npa_lf_active(void *otx2_dev)
{
struct otx2_dev *dev = otx2_dev;
struct otx2_idev_cfg *idev;
/* Check if npalf is actively used on this dev */
idev = otx2_intra_dev_get_cfg();
if (!idev || !idev->npa_lf || idev->npa_lf->mbox != dev->mbox)
return 0;
return rte_atomic16_read(&idev->npa_refcnt);
}
/*
* @internal
* Gets reference only to existing NPA LF object.
*/
int otx2_npa_lf_obj_ref(void)
{
struct otx2_idev_cfg *idev;
uint16_t cnt;
int rc;
idev = otx2_intra_dev_get_cfg();
/* Check if ref not possible */
if (idev == NULL)
return -EINVAL;
/* Get ref only if > 0 */
cnt = rte_atomic16_read(&idev->npa_refcnt);
while (cnt != 0) {
rc = rte_atomic16_cmpset(&idev->npa_refcnt_u16, cnt, cnt + 1);
if (rc)
break;
cnt = rte_atomic16_read(&idev->npa_refcnt);
}
return cnt ? 0 : -EINVAL;
}
static int
parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
{
RTE_SET_USED(key);
uint64_t val;
val = strtoull(value, NULL, 16);
*(uint64_t *)extra_args = val;
return 0;
}
/*
* @internal
* Parse common device arguments
*/
void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
{
struct otx2_idev_cfg *idev;
uint64_t npa_lock_mask = 0;
idev = otx2_intra_dev_get_cfg();
if (idev == NULL)
return;
rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
&parse_npa_lock_mask, &npa_lock_mask);
idev->npa_lock_mask = npa_lock_mask;
}
RTE_LOG_REGISTER(otx2_logtype_base, pmd.octeontx2.base, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_mbox, pmd.octeontx2.mbox, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_npa, pmd.mempool.octeontx2, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_nix, pmd.net.octeontx2, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_npc, pmd.net.octeontx2.flow, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_tm, pmd.net.octeontx2.tm, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_sso, pmd.event.octeontx2, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_tim, pmd.event.octeontx2.timer, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_dpi, pmd.raw.octeontx2.dpi, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_ep, pmd.raw.octeontx2.ep, NOTICE);
RTE_LOG_REGISTER(otx2_logtype_ree, pmd.regex.octeontx2, NOTICE);

View File

@ -1,179 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_COMMON_H_
#define _OTX2_COMMON_H_
#include <rte_atomic.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_io.h>
#include "hw/otx2_rvu.h"
#include "hw/otx2_nix.h"
#include "hw/otx2_npc.h"
#include "hw/otx2_npa.h"
#include "hw/otx2_sdp.h"
#include "hw/otx2_sso.h"
#include "hw/otx2_ssow.h"
#include "hw/otx2_tim.h"
#include "hw/otx2_ree.h"
/* Alignment */
#define OTX2_ALIGN 128
/* Bits manipulation */
#ifndef BIT_ULL
#define BIT_ULL(nr) (1ULL << (nr))
#endif
#ifndef BIT
#define BIT(nr) (1UL << (nr))
#endif
#ifndef BITS_PER_LONG
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#endif
#ifndef BITS_PER_LONG_LONG
#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
#endif
#ifndef GENMASK
#define GENMASK(h, l) \
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#endif
#ifndef GENMASK_ULL
#define GENMASK_ULL(h, l) \
(((~0ULL) - (1ULL << (l)) + 1) & \
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif
#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
/* Intra device related functions */
struct otx2_npa_lf;
struct otx2_idev_cfg {
uint16_t sso_pf_func;
uint16_t npa_pf_func;
struct otx2_npa_lf *npa_lf;
RTE_STD_C11
union {
rte_atomic16_t npa_refcnt;
uint16_t npa_refcnt_u16;
};
uint64_t npa_lock_mask;
};
__rte_internal
struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void);
__rte_internal
void otx2_sso_pf_func_set(uint16_t sso_pf_func);
__rte_internal
uint16_t otx2_sso_pf_func_get(void);
__rte_internal
uint16_t otx2_npa_pf_func_get(void);
__rte_internal
struct otx2_npa_lf *otx2_npa_lf_obj_get(void);
__rte_internal
void otx2_npa_set_defaults(struct otx2_idev_cfg *idev);
__rte_internal
int otx2_npa_lf_active(void *dev);
__rte_internal
int otx2_npa_lf_obj_ref(void);
__rte_internal
void otx2_parse_common_devargs(struct rte_kvargs *kvlist);
/* Log */
extern int otx2_logtype_base;
extern int otx2_logtype_mbox;
extern int otx2_logtype_npa;
extern int otx2_logtype_nix;
extern int otx2_logtype_sso;
extern int otx2_logtype_npc;
extern int otx2_logtype_tm;
extern int otx2_logtype_tim;
extern int otx2_logtype_dpi;
extern int otx2_logtype_ep;
extern int otx2_logtype_ree;
#define otx2_err(fmt, args...) \
RTE_LOG(ERR, PMD, "%s():%u " fmt "\n", \
__func__, __LINE__, ## args)
#define otx2_info(fmt, args...) \
RTE_LOG(INFO, PMD, fmt"\n", ## args)
#define otx2_dbg(subsystem, fmt, args...) \
rte_log(RTE_LOG_DEBUG, otx2_logtype_ ## subsystem, \
"[%s] %s():%u " fmt "\n", \
#subsystem, __func__, __LINE__, ##args)
#define otx2_base_dbg(fmt, ...) otx2_dbg(base, fmt, ##__VA_ARGS__)
#define otx2_mbox_dbg(fmt, ...) otx2_dbg(mbox, fmt, ##__VA_ARGS__)
#define otx2_npa_dbg(fmt, ...) otx2_dbg(npa, fmt, ##__VA_ARGS__)
#define otx2_nix_dbg(fmt, ...) otx2_dbg(nix, fmt, ##__VA_ARGS__)
#define otx2_sso_dbg(fmt, ...) otx2_dbg(sso, fmt, ##__VA_ARGS__)
#define otx2_npc_dbg(fmt, ...) otx2_dbg(npc, fmt, ##__VA_ARGS__)
#define otx2_tm_dbg(fmt, ...) otx2_dbg(tm, fmt, ##__VA_ARGS__)
#define otx2_tim_dbg(fmt, ...) otx2_dbg(tim, fmt, ##__VA_ARGS__)
#define otx2_dpi_dbg(fmt, ...) otx2_dbg(dpi, fmt, ##__VA_ARGS__)
#define otx2_sdp_dbg(fmt, ...) otx2_dbg(ep, fmt, ##__VA_ARGS__)
#define otx2_ree_dbg(fmt, ...) otx2_dbg(ree, fmt, ##__VA_ARGS__)
/* PCI IDs */
#define PCI_VENDOR_ID_CAVIUM 0x177D
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
#define PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_PF 0xA0F9
#define PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF 0xA0FA
#define PCI_DEVID_OCTEONTX2_RVU_NPA_PF 0xA0FB
#define PCI_DEVID_OCTEONTX2_RVU_NPA_VF 0xA0FC
#define PCI_DEVID_OCTEONTX2_RVU_CPT_PF 0xA0FD
#define PCI_DEVID_OCTEONTX2_RVU_CPT_VF 0xA0FE
#define PCI_DEVID_OCTEONTX2_RVU_AF_VF 0xA0f8
#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
#define PCI_DEVID_OCTEONTX2_EP_NET_VF 0xB203 /* OCTEON TX2 EP mode */
/* OCTEON TX2 98xx EP mode */
#define PCI_DEVID_CN98XX_EP_NET_VF 0xB103
#define PCI_DEVID_OCTEONTX2_EP_RAW_VF 0xB204 /* OCTEON TX2 EP mode */
#define PCI_DEVID_OCTEONTX2_RVU_SDP_PF 0xA0f6
#define PCI_DEVID_OCTEONTX2_RVU_SDP_VF 0xA0f7
#define PCI_DEVID_OCTEONTX2_RVU_REE_PF 0xA0f4
#define PCI_DEVID_OCTEONTX2_RVU_REE_VF 0xA0f5
/*
* REVID for RVU PCIe devices.
* Bits 0..1: minor pass
* Bits 3..2: major pass
* Bits 7..4: midr id, 0:96, 1:95, 2:loki, f:unknown
*/
#define RVU_PCI_REV_MIDR_ID(rev_id) (rev_id >> 4)
#define RVU_PCI_REV_MAJOR(rev_id) ((rev_id >> 2) & 0x3)
#define RVU_PCI_REV_MINOR(rev_id) (rev_id & 0x3)
#define RVU_PCI_CN96XX_MIDR_ID 0x0
#define RVU_PCI_CNF95XX_MIDR_ID 0x1
/* PCI Config offsets */
#define RVU_PCI_REVISION_ID 0x08
/* IO Access */
#define otx2_read64(addr) rte_read64_relaxed((void *)(addr))
#define otx2_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
#if defined(RTE_ARCH_ARM64)
#include "otx2_io_arm64.h"
#else
#include "otx2_io_generic.h"
#endif
/* Fastpath lookup */
#define OTX2_NIX_FASTPATH_LOOKUP_MEM "otx2_nix_fastpath_lookup_mem"
#define OTX2_NIX_SA_TBL_START (4096*4 + 69632*2)
#endif /* _OTX2_COMMON_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,161 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_DEV_H
#define _OTX2_DEV_H
#include <rte_bus_pci.h>
#include "otx2_common.h"
#include "otx2_irq.h"
#include "otx2_mbox.h"
#include "otx2_mempool.h"
/* Common HWCAP flags. Use from LSB bits */
#define OTX2_HWCAP_F_VF BIT_ULL(8) /* VF device */
#define otx2_dev_is_vf(dev) (dev->hwcap & OTX2_HWCAP_F_VF)
#define otx2_dev_is_pf(dev) (!(dev->hwcap & OTX2_HWCAP_F_VF))
#define otx2_dev_is_lbk(dev) ((dev->hwcap & OTX2_HWCAP_F_VF) && \
(dev->tx_chan_base < 0x700))
#define otx2_dev_revid(dev) (dev->hwcap & 0xFF)
#define otx2_dev_is_sdp(dev) (dev->sdp_link)
#define otx2_dev_is_vf_or_sdp(dev) \
(otx2_dev_is_vf(dev) || otx2_dev_is_sdp(dev))
#define otx2_dev_is_A0(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0))
#define otx2_dev_is_Ax(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0))
#define otx2_dev_is_95xx_A0(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
#define otx2_dev_is_95xx_Ax(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x1))
#define otx2_dev_is_96xx_A0(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
#define otx2_dev_is_96xx_Ax(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
#define otx2_dev_is_96xx_Cx(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) && \
(RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
#define otx2_dev_is_96xx_C0(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x2) && \
(RVU_PCI_REV_MINOR(otx2_dev_revid(dev)) == 0x0) && \
(RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x0))
#define otx2_dev_is_98xx(dev) \
(RVU_PCI_REV_MIDR_ID(otx2_dev_revid(dev)) == 0x3)
struct otx2_dev;
/* Link status update callback */
typedef void (*otx2_link_status_update_t)(struct otx2_dev *dev,
struct cgx_link_user_info *link);
/* PTP info callback */
typedef int (*otx2_ptp_info_t)(struct otx2_dev *dev, bool ptp_en);
/* Link status get callback */
typedef void (*otx2_link_status_get_t)(struct otx2_dev *dev,
struct cgx_link_user_info *link);
struct otx2_dev_ops {
otx2_link_status_update_t link_status_update;
otx2_ptp_info_t ptp_info_update;
otx2_link_status_get_t link_status_get;
};
#define OTX2_DEV \
int node __rte_cache_aligned; \
uint16_t pf; \
int16_t vf; \
uint16_t pf_func; \
uint8_t mbox_active; \
bool drv_inited; \
uint64_t active_vfs[MAX_VFPF_DWORD_BITS]; \
uintptr_t bar2; \
uintptr_t bar4; \
struct otx2_mbox mbox_local; \
struct otx2_mbox mbox_up; \
struct otx2_mbox mbox_vfpf; \
struct otx2_mbox mbox_vfpf_up; \
otx2_intr_t intr; \
int timer_set; /* ~0 : no alarm handling */ \
uint64_t hwcap; \
struct otx2_npa_lf npalf; \
struct otx2_mbox *mbox; \
uint16_t maxvf; \
const struct otx2_dev_ops *ops
struct otx2_dev {
OTX2_DEV;
};
__rte_internal
int otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev);
/* Common dev init and fini routines */
static __rte_always_inline int
otx2_dev_init(struct rte_pci_device *pci_dev, void *otx2_dev)
{
struct otx2_dev *dev = otx2_dev;
uint8_t rev_id;
int rc;
rc = rte_pci_read_config(pci_dev, &rev_id,
1, RVU_PCI_REVISION_ID);
if (rc != 1) {
otx2_err("Failed to read pci revision id, rc=%d", rc);
return rc;
}
dev->hwcap = rev_id;
return otx2_dev_priv_init(pci_dev, otx2_dev);
}
__rte_internal
void otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev);
__rte_internal
int otx2_dev_active_vfs(void *otx2_dev);
#define RVU_PFVF_PF_SHIFT 10
#define RVU_PFVF_PF_MASK 0x3F
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
static inline int
otx2_get_vf(uint16_t pf_func)
{
return (((pf_func >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK) - 1);
}
static inline int
otx2_get_pf(uint16_t pf_func)
{
return (pf_func >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
}
static inline int
otx2_pfvf_func(int pf, int vf)
{
return (pf << RVU_PFVF_PF_SHIFT) | ((vf << RVU_PFVF_FUNC_SHIFT) + 1);
}
static inline int
otx2_is_afvf(uint16_t pf_func)
{
return !(pf_func & ~RVU_PFVF_FUNC_MASK);
}
#endif /* _OTX2_DEV_H */

View File

@ -1,114 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_IO_ARM64_H_
#define _OTX2_IO_ARM64_H_
#define otx2_load_pair(val0, val1, addr) ({ \
asm volatile( \
"ldp %x[x0], %x[x1], [%x[p1]]" \
:[x0]"=r"(val0), [x1]"=r"(val1) \
:[p1]"r"(addr) \
); })
#define otx2_store_pair(val0, val1, addr) ({ \
asm volatile( \
"stp %x[x0], %x[x1], [%x[p1],#0]!" \
::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
); })
#define otx2_prefetch_store_keep(ptr) ({\
asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (ptr)); })
#if defined(__ARM_FEATURE_SVE)
#define __LSE_PREAMBLE " .cpu generic+lse+sve\n"
#else
#define __LSE_PREAMBLE " .cpu generic+lse\n"
#endif
static __rte_always_inline uint64_t
otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr)
{
uint64_t result;
/* Atomic add with no ordering */
asm volatile (
__LSE_PREAMBLE
"ldadd %x[i], %x[r], [%[b]]"
: [r] "=r" (result), "+m" (*ptr)
: [i] "r" (incr), [b] "r" (ptr)
: "memory");
return result;
}
static __rte_always_inline uint64_t
otx2_atomic64_add_sync(int64_t incr, int64_t *ptr)
{
uint64_t result;
/* Atomic add with ordering */
asm volatile (
__LSE_PREAMBLE
"ldadda %x[i], %x[r], [%[b]]"
: [r] "=r" (result), "+m" (*ptr)
: [i] "r" (incr), [b] "r" (ptr)
: "memory");
return result;
}
static __rte_always_inline uint64_t
otx2_lmt_submit(rte_iova_t io_address)
{
uint64_t result;
asm volatile (
__LSE_PREAMBLE
"ldeor xzr,%x[rf],[%[rs]]" :
[rf] "=r"(result): [rs] "r"(io_address));
return result;
}
static __rte_always_inline uint64_t
otx2_lmt_submit_release(rte_iova_t io_address)
{
uint64_t result;
asm volatile (
__LSE_PREAMBLE
"ldeorl xzr,%x[rf],[%[rs]]" :
[rf] "=r"(result) : [rs] "r"(io_address));
return result;
}
static __rte_always_inline void
otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
{
volatile const __uint128_t *src128 = (const __uint128_t *)in;
volatile __uint128_t *dst128 = (__uint128_t *)out;
dst128[0] = src128[0];
dst128[1] = src128[1];
/* lmtext receives following value:
* 1: NIX_SUBDC_EXT needed i.e. tx vlan case
* 2: NIX_SUBDC_EXT + NIX_SUBDC_MEM i.e. tstamp case
*/
if (lmtext) {
dst128[2] = src128[2];
if (lmtext > 1)
dst128[3] = src128[3];
}
}
static __rte_always_inline void
otx2_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
{
volatile const __uint128_t *src128 = (const __uint128_t *)in;
volatile __uint128_t *dst128 = (__uint128_t *)out;
uint8_t i;
for (i = 0; i < segdw; i++)
dst128[i] = src128[i];
}
#undef __LSE_PREAMBLE
#endif /* _OTX2_IO_ARM64_H_ */

View File

@ -1,75 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_IO_GENERIC_H_
#define _OTX2_IO_GENERIC_H_
#include <string.h>
#define otx2_load_pair(val0, val1, addr) \
do { \
val0 = rte_read64_relaxed((void *)(addr)); \
val1 = rte_read64_relaxed((uint8_t *)(addr) + 8); \
} while (0)
#define otx2_store_pair(val0, val1, addr) \
do { \
rte_write64_relaxed(val0, (void *)(addr)); \
rte_write64_relaxed(val1, (((uint8_t *)(addr)) + 8)); \
} while (0)
#define otx2_prefetch_store_keep(ptr) do {} while (0)
static inline uint64_t
otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr)
{
RTE_SET_USED(ptr);
RTE_SET_USED(incr);
return 0;
}
static inline uint64_t
otx2_atomic64_add_sync(int64_t incr, int64_t *ptr)
{
RTE_SET_USED(ptr);
RTE_SET_USED(incr);
return 0;
}
static inline int64_t
otx2_lmt_submit(uint64_t io_address)
{
RTE_SET_USED(io_address);
return 0;
}
static inline int64_t
otx2_lmt_submit_release(uint64_t io_address)
{
RTE_SET_USED(io_address);
return 0;
}
static __rte_always_inline void
otx2_lmt_mov(void *out, const void *in, const uint32_t lmtext)
{
/* Copy four words if lmtext = 0
* six words if lmtext = 1
* eight words if lmtext =2
*/
memcpy(out, in, (4 + (2 * lmtext)) * sizeof(uint64_t));
}
static __rte_always_inline void
otx2_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
{
RTE_SET_USED(out);
RTE_SET_USED(in);
RTE_SET_USED(segdw);
}
#endif /* _OTX2_IO_GENERIC_H_ */

View File

@ -1,288 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include <rte_alarm.h>
#include <rte_common.h>
#include <rte_eal.h>
#include <rte_interrupts.h>
#include "otx2_common.h"
#include "otx2_irq.h"
#ifdef RTE_EAL_VFIO
#include <inttypes.h>
#include <linux/vfio.h>
#include <sys/eventfd.h>
#include <sys/ioctl.h>
#include <unistd.h>
#define MAX_INTR_VEC_ID RTE_MAX_RXTX_INTR_VEC_ID
#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
sizeof(int) * (MAX_INTR_VEC_ID))
static int
irq_get_info(struct rte_intr_handle *intr_handle)
{
struct vfio_irq_info irq = { .argsz = sizeof(irq) };
int rc, vfio_dev_fd;
irq.index = VFIO_PCI_MSIX_IRQ_INDEX;
vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
rc = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
if (rc < 0) {
otx2_err("Failed to get IRQ info rc=%d errno=%d", rc, errno);
return rc;
}
otx2_base_dbg("Flags=0x%x index=0x%x count=0x%x max_intr_vec_id=0x%x",
irq.flags, irq.index, irq.count, MAX_INTR_VEC_ID);
if (irq.count > MAX_INTR_VEC_ID) {
otx2_err("HW max=%d > MAX_INTR_VEC_ID: %d",
rte_intr_max_intr_get(intr_handle),
MAX_INTR_VEC_ID);
if (rte_intr_max_intr_set(intr_handle, MAX_INTR_VEC_ID))
return -1;
} else {
if (rte_intr_max_intr_set(intr_handle, irq.count))
return -1;
}
return 0;
}
static int
irq_config(struct rte_intr_handle *intr_handle, unsigned int vec)
{
char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
int len, rc, vfio_dev_fd;
int32_t *fd_ptr;
if (vec > (uint32_t)rte_intr_max_intr_get(intr_handle)) {
otx2_err("vector=%d greater than max_intr=%d", vec,
rte_intr_max_intr_get(intr_handle));
return -EINVAL;
}
len = sizeof(struct vfio_irq_set) + sizeof(int32_t);
irq_set = (struct vfio_irq_set *)irq_set_buf;
irq_set->argsz = len;
irq_set->start = vec;
irq_set->count = 1;
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
/* Use vec fd to set interrupt vectors */
fd_ptr = (int32_t *)&irq_set->data[0];
fd_ptr[0] = rte_intr_efds_index_get(intr_handle, vec);
vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (rc)
otx2_err("Failed to set_irqs vector=0x%x rc=%d", vec, rc);
return rc;
}
static int
irq_init(struct rte_intr_handle *intr_handle)
{
char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
int len, rc, vfio_dev_fd;
int32_t *fd_ptr;
uint32_t i;
if (rte_intr_max_intr_get(intr_handle) > MAX_INTR_VEC_ID) {
otx2_err("Max_intr=%d greater than MAX_INTR_VEC_ID=%d",
rte_intr_max_intr_get(intr_handle),
MAX_INTR_VEC_ID);
return -ERANGE;
}
len = sizeof(struct vfio_irq_set) +
sizeof(int32_t) * rte_intr_max_intr_get(intr_handle);
irq_set = (struct vfio_irq_set *)irq_set_buf;
irq_set->argsz = len;
irq_set->start = 0;
irq_set->count = rte_intr_max_intr_get(intr_handle);
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
fd_ptr = (int32_t *)&irq_set->data[0];
for (i = 0; i < irq_set->count; i++)
fd_ptr[i] = -1;
vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (rc)
otx2_err("Failed to set irqs vector rc=%d", rc);
return rc;
}
/**
* @internal
* Disable IRQ
*/
int
otx2_disable_irqs(struct rte_intr_handle *intr_handle)
{
/* Clear max_intr to indicate re-init next time */
if (rte_intr_max_intr_set(intr_handle, 0))
return -1;
return rte_intr_disable(intr_handle);
}
/**
* @internal
* Register IRQ
*/
int
otx2_register_irq(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *data, unsigned int vec)
{
struct rte_intr_handle *tmp_handle;
uint32_t nb_efd, tmp_nb_efd;
int rc, fd;
/* If no max_intr read from VFIO */
if (rte_intr_max_intr_get(intr_handle) == 0) {
irq_get_info(intr_handle);
irq_init(intr_handle);
}
if (vec > (uint32_t)rte_intr_max_intr_get(intr_handle)) {
otx2_err("Vector=%d greater than max_intr=%d", vec,
rte_intr_max_intr_get(intr_handle));
return -EINVAL;
}
tmp_handle = intr_handle;
/* Create new eventfd for interrupt vector */
fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (fd == -1)
return -ENODEV;
if (rte_intr_fd_set(tmp_handle, fd))
return errno;
/* Register vector interrupt callback */
rc = rte_intr_callback_register(tmp_handle, cb, data);
if (rc) {
otx2_err("Failed to register vector:0x%x irq callback.", vec);
return rc;
}
rte_intr_efds_index_set(intr_handle, vec, fd);
nb_efd = (vec > (uint32_t)rte_intr_nb_efd_get(intr_handle)) ?
vec : (uint32_t)rte_intr_nb_efd_get(intr_handle);
rte_intr_nb_efd_set(intr_handle, nb_efd);
tmp_nb_efd = rte_intr_nb_efd_get(intr_handle) + 1;
if (tmp_nb_efd > (uint32_t)rte_intr_max_intr_get(intr_handle))
rte_intr_max_intr_set(intr_handle, tmp_nb_efd);
otx2_base_dbg("Enable vector:0x%x for vfio (efds: %d, max:%d)", vec,
rte_intr_nb_efd_get(intr_handle),
rte_intr_max_intr_get(intr_handle));
/* Enable MSIX vectors to VFIO */
return irq_config(intr_handle, vec);
}
/**
* @internal
* Unregister IRQ
*/
void
otx2_unregister_irq(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *data, unsigned int vec)
{
struct rte_intr_handle *tmp_handle;
uint8_t retries = 5; /* 5 ms */
int rc, fd;
if (vec > (uint32_t)rte_intr_max_intr_get(intr_handle)) {
otx2_err("Error unregistering MSI-X interrupts vec:%d > %d",
vec, rte_intr_max_intr_get(intr_handle));
return;
}
tmp_handle = intr_handle;
fd = rte_intr_efds_index_get(intr_handle, vec);
if (fd == -1)
return;
if (rte_intr_fd_set(tmp_handle, fd))
return;
do {
/* Un-register callback func from platform lib */
rc = rte_intr_callback_unregister(tmp_handle, cb, data);
/* Retry only if -EAGAIN */
if (rc != -EAGAIN)
break;
rte_delay_ms(1);
retries--;
} while (retries);
if (rc < 0) {
otx2_err("Error unregistering MSI-X vec %d cb, rc=%d", vec, rc);
return;
}
otx2_base_dbg("Disable vector:0x%x for vfio (efds: %d, max:%d)", vec,
rte_intr_nb_efd_get(intr_handle),
rte_intr_max_intr_get(intr_handle));
if (rte_intr_efds_index_get(intr_handle, vec) != -1)
close(rte_intr_efds_index_get(intr_handle, vec));
/* Disable MSIX vectors from VFIO */
rte_intr_efds_index_set(intr_handle, vec, -1);
irq_config(intr_handle, vec);
}
#else
/**
* @internal
* Register IRQ
*/
int otx2_register_irq(__rte_unused struct rte_intr_handle *intr_handle,
__rte_unused rte_intr_callback_fn cb,
__rte_unused void *data, __rte_unused unsigned int vec)
{
return -ENOTSUP;
}
/**
* @internal
* Unregister IRQ
*/
void otx2_unregister_irq(__rte_unused struct rte_intr_handle *intr_handle,
__rte_unused rte_intr_callback_fn cb,
__rte_unused void *data, __rte_unused unsigned int vec)
{
}
/**
* @internal
* Disable IRQ
*/
int otx2_disable_irqs(__rte_unused struct rte_intr_handle *intr_handle)
{
return -ENOTSUP;
}
#endif /* RTE_EAL_VFIO */

View File

@ -1,28 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_IRQ_H_
#define _OTX2_IRQ_H_
#include <rte_pci.h>
#include <rte_interrupts.h>
#include "otx2_common.h"
typedef struct {
/* 128 devices translate to two 64 bits dwords */
#define MAX_VFPF_DWORD_BITS 2
uint64_t bits[MAX_VFPF_DWORD_BITS];
} otx2_intr_t;
__rte_internal
int otx2_register_irq(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *data, unsigned int vec);
__rte_internal
void otx2_unregister_irq(struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *data, unsigned int vec);
__rte_internal
int otx2_disable_irqs(struct rte_intr_handle *intr_handle);
#endif /* _OTX2_IRQ_H_ */

View File

@ -1,465 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <rte_atomic.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
#include "otx2_mbox.h"
#include "otx2_dev.h"
#define RVU_AF_AFPF_MBOX0 (0x02000)
#define RVU_AF_AFPF_MBOX1 (0x02008)
#define RVU_PF_PFAF_MBOX0 (0xC00)
#define RVU_PF_PFAF_MBOX1 (0xC08)
#define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
#define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
#define RVU_VF_VFPF_MBOX0 (0x0000)
#define RVU_VF_VFPF_MBOX1 (0x0008)
static inline uint16_t
msgs_offset(void)
{
return RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
}
void
otx2_mbox_fini(struct otx2_mbox *mbox)
{
mbox->reg_base = 0;
mbox->hwbase = 0;
rte_free(mbox->dev);
mbox->dev = NULL;
}
void
otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr =
(struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
struct mbox_hdr *rx_hdr =
(struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
rte_spinlock_lock(&mdev->mbox_lock);
mdev->msg_size = 0;
mdev->rsp_size = 0;
tx_hdr->msg_size = 0;
tx_hdr->num_msgs = 0;
rx_hdr->msg_size = 0;
rx_hdr->num_msgs = 0;
rte_spinlock_unlock(&mdev->mbox_lock);
}
int
otx2_mbox_init(struct otx2_mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
int direction, int ndevs, uint64_t intr_offset)
{
struct otx2_mbox_dev *mdev;
int devid;
mbox->intr_offset = intr_offset;
mbox->reg_base = reg_base;
mbox->hwbase = hwbase;
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
mbox->tx_start = MBOX_DOWN_TX_START;
mbox->rx_start = MBOX_DOWN_RX_START;
mbox->tx_size = MBOX_DOWN_TX_SIZE;
mbox->rx_size = MBOX_DOWN_RX_SIZE;
break;
case MBOX_DIR_PFAF:
case MBOX_DIR_VFPF:
mbox->tx_start = MBOX_DOWN_RX_START;
mbox->rx_start = MBOX_DOWN_TX_START;
mbox->tx_size = MBOX_DOWN_RX_SIZE;
mbox->rx_size = MBOX_DOWN_TX_SIZE;
break;
case MBOX_DIR_AFPF_UP:
case MBOX_DIR_PFVF_UP:
mbox->tx_start = MBOX_UP_TX_START;
mbox->rx_start = MBOX_UP_RX_START;
mbox->tx_size = MBOX_UP_TX_SIZE;
mbox->rx_size = MBOX_UP_RX_SIZE;
break;
case MBOX_DIR_PFAF_UP:
case MBOX_DIR_VFPF_UP:
mbox->tx_start = MBOX_UP_RX_START;
mbox->rx_start = MBOX_UP_TX_START;
mbox->tx_size = MBOX_UP_RX_SIZE;
mbox->rx_size = MBOX_UP_TX_SIZE;
break;
default:
return -ENODEV;
}
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_AFPF_UP:
mbox->trigger = RVU_AF_AFPF_MBOX0;
mbox->tr_shift = 4;
break;
case MBOX_DIR_PFAF:
case MBOX_DIR_PFAF_UP:
mbox->trigger = RVU_PF_PFAF_MBOX1;
mbox->tr_shift = 0;
break;
case MBOX_DIR_PFVF:
case MBOX_DIR_PFVF_UP:
mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
mbox->tr_shift = 12;
break;
case MBOX_DIR_VFPF:
case MBOX_DIR_VFPF_UP:
mbox->trigger = RVU_VF_VFPF_MBOX1;
mbox->tr_shift = 0;
break;
default:
return -ENODEV;
}
mbox->dev = rte_zmalloc("mbox dev",
ndevs * sizeof(struct otx2_mbox_dev),
OTX2_ALIGN);
if (!mbox->dev) {
otx2_mbox_fini(mbox);
return -ENOMEM;
}
mbox->ndevs = ndevs;
for (devid = 0; devid < ndevs; devid++) {
mdev = &mbox->dev[devid];
mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
rte_spinlock_init(&mdev->mbox_lock);
/* Init header to reset value */
otx2_mbox_reset(mbox, devid);
}
return 0;
}
/**
* @internal
* Allocate a message response
*/
struct mbox_msghdr *
otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size,
int size_rsp)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_msghdr *msghdr = NULL;
rte_spinlock_lock(&mdev->mbox_lock);
size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
size_rsp = RTE_ALIGN(size_rsp, MBOX_MSG_ALIGN);
/* Check if there is space in mailbox */
if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
goto exit;
if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
goto exit;
if (mdev->msg_size == 0)
mdev->num_msgs = 0;
mdev->num_msgs++;
msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
mbox->tx_start + msgs_offset() + mdev->msg_size));
/* Clear the whole msg region */
otx2_mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
/* Init message header with reset values */
msghdr->ver = OTX2_MBOX_VERSION;
mdev->msg_size += size;
mdev->rsp_size += size_rsp;
msghdr->next_msgoff = mdev->msg_size + msgs_offset();
exit:
rte_spinlock_unlock(&mdev->mbox_lock);
return msghdr;
}
/**
* @internal
* Send a mailbox message
*/
void
otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr =
(struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
struct mbox_hdr *rx_hdr =
(struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
/* Reset header for next messages */
tx_hdr->msg_size = mdev->msg_size;
mdev->msg_size = 0;
mdev->rsp_size = 0;
mdev->msgs_acked = 0;
/* num_msgs != 0 signals to the peer that the buffer has a number of
* messages. So this should be written after copying txmem
*/
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
/* Sync mbox data into memory */
rte_wmb();
/* The interrupt should be fired after num_msgs is written
* to the shared memory
*/
rte_write64(1, (volatile void *)(mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift))));
}
/**
* @internal
* Wait and get mailbox response
*/
int
otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, void **msg)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_msghdr *msghdr;
uint64_t offset;
int rc;
rc = otx2_mbox_wait_for_rsp(mbox, devid);
if (rc != 1)
return -EIO;
rte_rmb();
offset = mbox->rx_start +
RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
if (msg != NULL)
*msg = msghdr;
return msghdr->rc;
}
/**
* Polling for given wait time to get mailbox response
*/
static int
mbox_poll(struct otx2_mbox *mbox, uint32_t wait)
{
uint32_t timeout = 0, sleep = 1;
uint32_t wait_us = wait * 1000;
uint64_t rsp_reg = 0;
uintptr_t reg_addr;
reg_addr = mbox->reg_base + mbox->intr_offset;
do {
rsp_reg = otx2_read64(reg_addr);
if (timeout >= wait_us)
return -ETIMEDOUT;
rte_delay_us(sleep);
timeout += sleep;
} while (!rsp_reg);
rte_smp_rmb();
/* Clear interrupt */
otx2_write64(rsp_reg, reg_addr);
/* Reset mbox */
otx2_mbox_reset(mbox, 0);
return 0;
}
/**
* @internal
* Wait and get mailbox response with timeout
*/
int
otx2_mbox_get_rsp_tmo(struct otx2_mbox *mbox, int devid, void **msg,
uint32_t tmo)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_msghdr *msghdr;
uint64_t offset;
int rc;
rc = otx2_mbox_wait_for_rsp_tmo(mbox, devid, tmo);
if (rc != 1)
return -EIO;
rte_rmb();
offset = mbox->rx_start +
RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
if (msg != NULL)
*msg = msghdr;
return msghdr->rc;
}
static int
mbox_wait(struct otx2_mbox *mbox, int devid, uint32_t rst_timo)
{
volatile struct otx2_mbox_dev *mdev = &mbox->dev[devid];
uint32_t timeout = 0, sleep = 1;
rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
while (mdev->num_msgs > mdev->msgs_acked) {
rte_delay_us(sleep);
timeout += sleep;
if (timeout >= rst_timo) {
struct mbox_hdr *tx_hdr =
(struct mbox_hdr *)((uintptr_t)mdev->mbase +
mbox->tx_start);
struct mbox_hdr *rx_hdr =
(struct mbox_hdr *)((uintptr_t)mdev->mbase +
mbox->rx_start);
otx2_err("MBOX[devid: %d] message wait timeout %d, "
"num_msgs: %d, msgs_acked: %d "
"(tx/rx num_msgs: %d/%d), msg_size: %d, "
"rsp_size: %d",
devid, timeout, mdev->num_msgs,
mdev->msgs_acked, tx_hdr->num_msgs,
rx_hdr->num_msgs, mdev->msg_size,
mdev->rsp_size);
return -EIO;
}
rte_rmb();
}
return 0;
}
int
otx2_mbox_wait_for_rsp_tmo(struct otx2_mbox *mbox, int devid, uint32_t tmo)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
int rc = 0;
/* Sync with mbox region */
rte_rmb();
if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
/* In case of VF, Wait a bit more to account round trip delay */
tmo = tmo * 2;
}
/* Wait message */
if (rte_thread_is_intr())
rc = mbox_poll(mbox, tmo);
else
rc = mbox_wait(mbox, devid, tmo);
if (!rc)
rc = mdev->num_msgs;
return rc;
}
/**
* @internal
* Wait for the mailbox response
*/
int
otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
return otx2_mbox_wait_for_rsp_tmo(mbox, devid, MBOX_RSP_TIMEOUT);
}
int
otx2_mbox_get_availmem(struct otx2_mbox *mbox, int devid)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
int avail;
rte_spinlock_lock(&mdev->mbox_lock);
avail = mbox->tx_size - mdev->msg_size - msgs_offset();
rte_spinlock_unlock(&mdev->mbox_lock);
return avail;
}
int
otx2_send_ready_msg(struct otx2_mbox *mbox, uint16_t *pcifunc)
{
struct ready_msg_rsp *rsp;
int rc;
otx2_mbox_alloc_msg_ready(mbox);
otx2_mbox_msg_send(mbox, 0);
rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
if (rc)
return rc;
if (rsp->hdr.ver != OTX2_MBOX_VERSION) {
otx2_err("Incompatible MBox versions(AF: 0x%04x DPDK: 0x%04x)",
rsp->hdr.ver, OTX2_MBOX_VERSION);
return -EPIPE;
}
if (pcifunc)
*pcifunc = rsp->hdr.pcifunc;
return 0;
}
int
otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, uint16_t pcifunc,
uint16_t id)
{
struct msg_rsp *rsp;
rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
if (!rsp)
return -ENOMEM;
rsp->hdr.id = id;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.rc = MBOX_MSG_INVALID;
rsp->hdr.pcifunc = pcifunc;
return 0;
}
/**
* @internal
* Convert mail box ID to name
*/
const char *otx2_mbox_id2name(uint16_t id)
{
switch (id) {
#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
#undef M
default :
return "INVALID ID";
}
}
int otx2_mbox_id2size(uint16_t id)
{
switch (id) {
#define M(_1, _id, _2, _req_type, _3) case _id: return sizeof(struct _req_type);
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
#undef M
default :
return 0;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,183 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2020 Marvell International Ltd.
*/
#include <rte_atomic.h>
#include <rte_bus_pci.h>
#include <ethdev_driver.h>
#include <rte_spinlock.h>
#include "otx2_common.h"
#include "otx2_sec_idev.h"
static struct otx2_sec_idev_cfg sec_cfg[OTX2_MAX_INLINE_PORTS];
/**
* @internal
* Check if rte_eth_dev is security offload capable otx2_eth_dev
*/
uint8_t
otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_PF ||
pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_VF ||
pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_AF_VF)
return 1;
return 0;
}
int
otx2_sec_idev_cfg_init(int port_id)
{
struct otx2_sec_idev_cfg *cfg;
int i;
cfg = &sec_cfg[port_id];
cfg->tx_cpt_idx = 0;
rte_spinlock_init(&cfg->tx_cpt_lock);
for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
cfg->tx_cpt[i].qp = NULL;
rte_atomic16_set(&cfg->tx_cpt[i].ref_cnt, 0);
}
return 0;
}
int
otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp)
{
struct otx2_sec_idev_cfg *cfg;
int i, ret;
if (qp == NULL || port_id >= OTX2_MAX_INLINE_PORTS)
return -EINVAL;
cfg = &sec_cfg[port_id];
/* Find a free slot to save CPT LF */
rte_spinlock_lock(&cfg->tx_cpt_lock);
for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
if (cfg->tx_cpt[i].qp == NULL) {
cfg->tx_cpt[i].qp = qp;
ret = 0;
goto unlock;
}
}
ret = -EINVAL;
unlock:
rte_spinlock_unlock(&cfg->tx_cpt_lock);
return ret;
}
int
otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp)
{
struct otx2_sec_idev_cfg *cfg;
uint16_t port_id;
int i, ret;
if (qp == NULL)
return -EINVAL;
for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
cfg = &sec_cfg[port_id];
rte_spinlock_lock(&cfg->tx_cpt_lock);
for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
if (cfg->tx_cpt[i].qp != qp)
continue;
/* Don't free if the QP is in use by any sec session */
if (rte_atomic16_read(&cfg->tx_cpt[i].ref_cnt)) {
ret = -EBUSY;
} else {
cfg->tx_cpt[i].qp = NULL;
ret = 0;
}
goto unlock;
}
rte_spinlock_unlock(&cfg->tx_cpt_lock);
}
return -ENOENT;
unlock:
rte_spinlock_unlock(&cfg->tx_cpt_lock);
return ret;
}
int
otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp)
{
struct otx2_sec_idev_cfg *cfg;
uint16_t index;
int i, ret;
if (port_id >= OTX2_MAX_INLINE_PORTS || qp == NULL)
return -EINVAL;
cfg = &sec_cfg[port_id];
rte_spinlock_lock(&cfg->tx_cpt_lock);
index = cfg->tx_cpt_idx;
/* Get the next index with valid data */
for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
if (cfg->tx_cpt[index].qp != NULL)
break;
index = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
}
if (i >= OTX2_MAX_CPT_QP_PER_PORT) {
ret = -EINVAL;
goto unlock;
}
*qp = cfg->tx_cpt[index].qp;
rte_atomic16_inc(&cfg->tx_cpt[index].ref_cnt);
cfg->tx_cpt_idx = (index + 1) % OTX2_MAX_CPT_QP_PER_PORT;
ret = 0;
unlock:
rte_spinlock_unlock(&cfg->tx_cpt_lock);
return ret;
}
int
otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp)
{
struct otx2_sec_idev_cfg *cfg;
uint16_t port_id;
int i;
if (qp == NULL)
return -EINVAL;
for (port_id = 0; port_id < OTX2_MAX_INLINE_PORTS; port_id++) {
cfg = &sec_cfg[port_id];
for (i = 0; i < OTX2_MAX_CPT_QP_PER_PORT; i++) {
if (cfg->tx_cpt[i].qp == qp) {
rte_atomic16_dec(&cfg->tx_cpt[i].ref_cnt);
return 0;
}
}
}
return -EINVAL;
}

View File

@ -1,43 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_SEC_IDEV_H_
#define _OTX2_SEC_IDEV_H_
#include <rte_ethdev.h>
#define OTX2_MAX_CPT_QP_PER_PORT 64
#define OTX2_MAX_INLINE_PORTS 64
struct otx2_cpt_qp;
struct otx2_sec_idev_cfg {
struct {
struct otx2_cpt_qp *qp;
rte_atomic16_t ref_cnt;
} tx_cpt[OTX2_MAX_CPT_QP_PER_PORT];
uint16_t tx_cpt_idx;
rte_spinlock_t tx_cpt_lock;
};
__rte_internal
uint8_t otx2_eth_dev_is_sec_capable(struct rte_eth_dev *eth_dev);
__rte_internal
int otx2_sec_idev_cfg_init(int port_id);
__rte_internal
int otx2_sec_idev_tx_cpt_qp_add(uint16_t port_id, struct otx2_cpt_qp *qp);
__rte_internal
int otx2_sec_idev_tx_cpt_qp_remove(struct otx2_cpt_qp *qp);
__rte_internal
int otx2_sec_idev_tx_cpt_qp_put(struct otx2_cpt_qp *qp);
__rte_internal
int otx2_sec_idev_tx_cpt_qp_get(uint16_t port_id, struct otx2_cpt_qp **qp);
#endif /* _OTX2_SEC_IDEV_H_ */

View File

@ -1,44 +0,0 @@
INTERNAL {
global:
otx2_dev_active_vfs;
otx2_dev_fini;
otx2_dev_priv_init;
otx2_disable_irqs;
otx2_eth_dev_is_sec_capable;
otx2_intra_dev_get_cfg;
otx2_logtype_base;
otx2_logtype_dpi;
otx2_logtype_ep;
otx2_logtype_mbox;
otx2_logtype_nix;
otx2_logtype_npa;
otx2_logtype_npc;
otx2_logtype_ree;
otx2_logtype_sso;
otx2_logtype_tim;
otx2_logtype_tm;
otx2_mbox_alloc_msg_rsp;
otx2_mbox_get_rsp;
otx2_mbox_get_rsp_tmo;
otx2_mbox_id2name;
otx2_mbox_msg_send;
otx2_mbox_wait_for_rsp;
otx2_npa_lf_active;
otx2_npa_lf_obj_get;
otx2_npa_lf_obj_ref;
otx2_npa_pf_func_get;
otx2_npa_set_defaults;
otx2_parse_common_devargs;
otx2_register_irq;
otx2_sec_idev_cfg_init;
otx2_sec_idev_tx_cpt_qp_add;
otx2_sec_idev_tx_cpt_qp_get;
otx2_sec_idev_tx_cpt_qp_put;
otx2_sec_idev_tx_cpt_qp_remove;
otx2_sso_pf_func_get;
otx2_sso_pf_func_set;
otx2_unregister_irq;
local: *;
};

View File

@ -16,7 +16,6 @@ drivers = [
'nitrox', 'nitrox',
'null', 'null',
'octeontx', 'octeontx',
'octeontx2',
'openssl', 'openssl',
'scheduler', 'scheduler',
'virtio', 'virtio',

View File

@ -1,30 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2019 Marvell International Ltd.
if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
build = false
reason = 'only supported on 64-bit Linux'
subdir_done()
endif
deps += ['bus_pci']
deps += ['common_cpt']
deps += ['common_octeontx2']
deps += ['ethdev']
deps += ['eventdev']
deps += ['security']
sources = files(
'otx2_cryptodev.c',
'otx2_cryptodev_capabilities.c',
'otx2_cryptodev_hw_access.c',
'otx2_cryptodev_mbox.c',
'otx2_cryptodev_ops.c',
'otx2_cryptodev_sec.c',
)
includes += include_directories('../../common/cpt')
includes += include_directories('../../common/octeontx2')
includes += include_directories('../../crypto/octeontx2')
includes += include_directories('../../mempool/octeontx2')
includes += include_directories('../../net/octeontx2')

View File

@ -1,188 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <cryptodev_pmd.h>
#include <rte_dev.h>
#include <rte_errno.h>
#include <rte_mempool.h>
#include <rte_pci.h>
#include "otx2_common.h"
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_cryptodev_ops.h"
#include "otx2_cryptodev_sec.h"
#include "otx2_dev.h"
/* CPT common headers */
#include "cpt_common.h"
#include "cpt_pmd_logs.h"
uint8_t otx2_cryptodev_driver_id;
static struct rte_pci_id pci_id_cpt_table[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_CPT_VF)
},
/* sentinel */
{
.device_id = 0
},
};
uint64_t
otx2_cpt_default_ff_get(void)
{
return RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_IN_PLACE_SGL |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
RTE_CRYPTODEV_FF_SECURITY |
RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
}
static int
otx2_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
struct rte_cryptodev_pmd_init_params init_params = {
.name = "",
.socket_id = rte_socket_id(),
.private_data_size = sizeof(struct otx2_cpt_vf)
};
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct rte_cryptodev *dev;
struct otx2_dev *otx2_dev;
struct otx2_cpt_vf *vf;
uint16_t nb_queues;
int ret;
rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
dev = rte_cryptodev_pmd_create(name, &pci_dev->device, &init_params);
if (dev == NULL) {
ret = -ENODEV;
goto exit;
}
dev->dev_ops = &otx2_cpt_ops;
dev->driver_id = otx2_cryptodev_driver_id;
/* Get private data space allocated */
vf = dev->data->dev_private;
otx2_dev = &vf->otx2_dev;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
/* Initialize the base otx2_dev object */
ret = otx2_dev_init(pci_dev, otx2_dev);
if (ret) {
CPT_LOG_ERR("Could not initialize otx2_dev");
goto pmd_destroy;
}
/* Get number of queues available on the device */
ret = otx2_cpt_available_queues_get(dev, &nb_queues);
if (ret) {
CPT_LOG_ERR("Could not determine the number of queues available");
goto otx2_dev_fini;
}
/* Don't exceed the limits set per VF */
nb_queues = RTE_MIN(nb_queues, OTX2_CPT_MAX_QUEUES_PER_VF);
if (nb_queues == 0) {
CPT_LOG_ERR("No free queues available on the device");
goto otx2_dev_fini;
}
vf->max_queues = nb_queues;
CPT_LOG_INFO("Max queues supported by device: %d",
vf->max_queues);
ret = otx2_cpt_hardware_caps_get(dev, vf->hw_caps);
if (ret) {
CPT_LOG_ERR("Could not determine hardware capabilities");
goto otx2_dev_fini;
}
}
otx2_crypto_capabilities_init(vf->hw_caps);
otx2_crypto_sec_capabilities_init(vf->hw_caps);
/* Create security ctx */
ret = otx2_crypto_sec_ctx_create(dev);
if (ret)
goto otx2_dev_fini;
dev->feature_flags = otx2_cpt_default_ff_get();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
otx2_cpt_set_enqdeq_fns(dev);
rte_cryptodev_pmd_probing_finish(dev);
return 0;
otx2_dev_fini:
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
otx2_dev_fini(pci_dev, otx2_dev);
pmd_destroy:
rte_cryptodev_pmd_destroy(dev);
exit:
CPT_LOG_ERR("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
pci_dev->id.vendor_id, pci_dev->id.device_id);
return ret;
}
static int
otx2_cpt_pci_remove(struct rte_pci_device *pci_dev)
{
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct rte_cryptodev *dev;
if (pci_dev == NULL)
return -EINVAL;
rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
dev = rte_cryptodev_pmd_get_named_dev(name);
if (dev == NULL)
return -ENODEV;
/* Destroy security ctx */
otx2_crypto_sec_ctx_destroy(dev);
return rte_cryptodev_pmd_destroy(dev);
}
static struct rte_pci_driver otx2_cryptodev_pmd = {
.id_table = pci_id_cpt_table,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = otx2_cpt_pci_probe,
.remove = otx2_cpt_pci_remove,
};
static struct cryptodev_driver otx2_cryptodev_drv;
RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_OCTEONTX2_PMD, otx2_cryptodev_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_OCTEONTX2_PMD, pci_id_cpt_table);
RTE_PMD_REGISTER_KMOD_DEP(CRYPTODEV_NAME_OCTEONTX2_PMD, "vfio-pci");
RTE_PMD_REGISTER_CRYPTO_DRIVER(otx2_cryptodev_drv, otx2_cryptodev_pmd.driver,
otx2_cryptodev_driver_id);
RTE_LOG_REGISTER_DEFAULT(otx2_cpt_logtype, NOTICE);

View File

@ -1,63 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_CRYPTODEV_H_
#define _OTX2_CRYPTODEV_H_
#include "cpt_common.h"
#include "cpt_hw_types.h"
#include "otx2_dev.h"
/* Marvell OCTEON TX2 Crypto PMD device name */
#define CRYPTODEV_NAME_OCTEONTX2_PMD crypto_octeontx2
#define OTX2_CPT_MAX_LFS 128
#define OTX2_CPT_MAX_QUEUES_PER_VF 64
#define OTX2_CPT_MAX_BLKS 2
#define OTX2_CPT_PMD_VERSION 3
#define OTX2_CPT_REVISION_ID_3 3
/**
* Device private data
*/
struct otx2_cpt_vf {
struct otx2_dev otx2_dev;
/**< Base class */
uint16_t max_queues;
/**< Max queues supported */
uint8_t nb_queues;
/**< Number of crypto queues attached */
uint16_t lf_msixoff[OTX2_CPT_MAX_LFS];
/**< MSI-X offsets */
uint8_t lf_blkaddr[OTX2_CPT_MAX_LFS];
/**< CPT0/1 BLKADDR of LFs */
uint8_t cpt_revision;
/**< CPT revision */
uint8_t err_intr_registered:1;
/**< Are error interrupts registered? */
union cpt_eng_caps hw_caps[CPT_MAX_ENG_TYPES];
/**< CPT device capabilities */
};
struct cpt_meta_info {
uint64_t deq_op_info[5];
uint64_t comp_code_sz;
union cpt_res_s cpt_res __rte_aligned(16);
struct cpt_request_info cpt_req;
};
#define CPT_LOGTYPE otx2_cpt_logtype
extern int otx2_cpt_logtype;
/*
* Crypto device driver ID
*/
extern uint8_t otx2_cryptodev_driver_id;
uint64_t otx2_cpt_default_ff_get(void);
void otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
#endif /* _OTX2_CRYPTODEV_H_ */

View File

@ -1,924 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#include <rte_cryptodev.h>
#include <rte_security.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
#include "otx2_mbox.h"
#define CPT_EGRP_GET(hw_caps, name, egrp) do { \
if ((hw_caps[CPT_ENG_TYPE_SE].name) && \
(hw_caps[CPT_ENG_TYPE_IE].name)) \
*egrp = OTX2_CPT_EGRP_SE_IE; \
else if (hw_caps[CPT_ENG_TYPE_SE].name) \
*egrp = OTX2_CPT_EGRP_SE; \
else if (hw_caps[CPT_ENG_TYPE_AE].name) \
*egrp = OTX2_CPT_EGRP_AE; \
else \
*egrp = OTX2_CPT_EGRP_MAX; \
} while (0)
#define CPT_CAPS_ADD(hw_caps, name) do { \
enum otx2_cpt_egrp egrp; \
CPT_EGRP_GET(hw_caps, name, &egrp); \
if (egrp < OTX2_CPT_EGRP_MAX) \
cpt_caps_add(caps_##name, RTE_DIM(caps_##name)); \
} while (0)
#define SEC_CAPS_ADD(hw_caps, name) do { \
enum otx2_cpt_egrp egrp; \
CPT_EGRP_GET(hw_caps, name, &egrp); \
if (egrp < OTX2_CPT_EGRP_MAX) \
sec_caps_add(sec_caps_##name, RTE_DIM(sec_caps_##name));\
} while (0)
#define OTX2_CPT_MAX_CAPS 34
#define OTX2_SEC_MAX_CAPS 4
static struct rte_cryptodev_capabilities otx2_cpt_caps[OTX2_CPT_MAX_CAPS];
static struct rte_cryptodev_capabilities otx2_cpt_sec_caps[OTX2_SEC_MAX_CAPS];
static const struct rte_cryptodev_capabilities caps_mul[] = {
{ /* RSA */
.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
{.asym = {
.xform_capa = {
.xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
.op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
(1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
(1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
(1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
{.modlen = {
.min = 17,
.max = 1024,
.increment = 1
}, }
}
}, }
},
{ /* MOD_EXP */
.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
{.asym = {
.xform_capa = {
.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
.op_types = 0,
{.modlen = {
.min = 17,
.max = 1024,
.increment = 1
}, }
}
}, }
},
{ /* ECDSA */
.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
{.asym = {
.xform_capa = {
.xform_type = RTE_CRYPTO_ASYM_XFORM_ECDSA,
.op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
(1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
}
},
}
},
{ /* ECPM */
.op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
{.asym = {
.xform_capa = {
.xform_type = RTE_CRYPTO_ASYM_XFORM_ECPM,
.op_types = 0
}
},
}
},
};
static const struct rte_cryptodev_capabilities caps_sha1_sha2[] = {
{ /* SHA1 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA1,
.block_size = 64,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.digest_size = {
.min = 20,
.max = 20,
.increment = 0
},
}, }
}, }
},
{ /* SHA1 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
.min = 1,
.max = 1024,
.increment = 1
},
.digest_size = {
.min = 12,
.max = 20,
.increment = 8
},
}, }
}, }
},
{ /* SHA224 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA224,
.block_size = 64,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.digest_size = {
.min = 28,
.max = 28,
.increment = 0
},
}, }
}, }
},
{ /* SHA224 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
.min = 1,
.max = 1024,
.increment = 1
},
.digest_size = {
.min = 28,
.max = 28,
.increment = 0
},
}, }
}, }
},
{ /* SHA256 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA256,
.block_size = 64,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.digest_size = {
.min = 32,
.max = 32,
.increment = 0
},
}, }
}, }
},
{ /* SHA256 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
.min = 1,
.max = 1024,
.increment = 1
},
.digest_size = {
.min = 16,
.max = 32,
.increment = 16
},
}, }
}, }
},
{ /* SHA384 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA384,
.block_size = 64,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.digest_size = {
.min = 48,
.max = 48,
.increment = 0
},
}, }
}, }
},
{ /* SHA384 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 64,
.key_size = {
.min = 1,
.max = 1024,
.increment = 1
},
.digest_size = {
.min = 24,
.max = 48,
.increment = 24
},
}, }
}, }
},
{ /* SHA512 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA512,
.block_size = 128,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.digest_size = {
.min = 64,
.max = 64,
.increment = 0
},
}, }
}, }
},
{ /* SHA512 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
.min = 1,
.max = 1024,
.increment = 1
},
.digest_size = {
.min = 32,
.max = 64,
.increment = 32
},
}, }
}, }
},
{ /* MD5 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_MD5,
.block_size = 64,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
}, }
}, }
},
{ /* MD5 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
.min = 8,
.max = 64,
.increment = 8
},
.digest_size = {
.min = 12,
.max = 16,
.increment = 4
},
}, }
}, }
},
};
static const struct rte_cryptodev_capabilities caps_chacha20[] = {
{ /* Chacha20-Poly1305 */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
{.aead = {
.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
.block_size = 64,
.key_size = {
.min = 32,
.max = 32,
.increment = 0
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
.aad_size = {
.min = 0,
.max = 1024,
.increment = 1
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
},
}, }
}, }
}
};
static const struct rte_cryptodev_capabilities caps_zuc_snow3g[] = {
{ /* SNOW 3G (UEA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
.block_size = 16,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.iv_size = {
.min = 16,
.max = 16,
.increment = 0
}
}, }
}, }
},
{ /* ZUC (EEA3) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
.block_size = 16,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.iv_size = {
.min = 16,
.max = 16,
.increment = 0
}
}, }
}, }
},
{ /* SNOW 3G (UIA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
.block_size = 16,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.digest_size = {
.min = 4,
.max = 4,
.increment = 0
},
.iv_size = {
.min = 16,
.max = 16,
.increment = 0
}
}, }
}, }
},
{ /* ZUC (EIA3) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
.block_size = 16,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.digest_size = {
.min = 4,
.max = 4,
.increment = 0
},
.iv_size = {
.min = 16,
.max = 16,
.increment = 0
}
}, }
}, }
},
};
static const struct rte_cryptodev_capabilities caps_aes[] = {
{ /* AES GMAC (AUTH) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_AES_GMAC,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.digest_size = {
.min = 8,
.max = 16,
.increment = 4
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
{ /* AES CBC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_AES_CBC,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.iv_size = {
.min = 16,
.max = 16,
.increment = 0
}
}, }
}, }
},
{ /* AES CTR */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_AES_CTR,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.iv_size = {
.min = 12,
.max = 16,
.increment = 4
}
}, }
}, }
},
{ /* AES XTS */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_AES_XTS,
.block_size = 16,
.key_size = {
.min = 32,
.max = 64,
.increment = 0
},
.iv_size = {
.min = 16,
.max = 16,
.increment = 0
}
}, }
}, }
},
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
{.aead = {
.algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.digest_size = {
.min = 4,
.max = 16,
.increment = 1
},
.aad_size = {
.min = 0,
.max = 1024,
.increment = 1
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
};
static const struct rte_cryptodev_capabilities caps_kasumi[] = {
{ /* KASUMI (F8) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
.block_size = 8,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.iv_size = {
.min = 8,
.max = 8,
.increment = 0
}
}, }
}, }
},
{ /* KASUMI (F9) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
.block_size = 8,
.key_size = {
.min = 16,
.max = 16,
.increment = 0
},
.digest_size = {
.min = 4,
.max = 4,
.increment = 0
},
}, }
}, }
},
};
static const struct rte_cryptodev_capabilities caps_des[] = {
{ /* 3DES CBC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
.block_size = 8,
.key_size = {
.min = 24,
.max = 24,
.increment = 0
},
.iv_size = {
.min = 8,
.max = 16,
.increment = 8
}
}, }
}, }
},
{ /* 3DES ECB */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_3DES_ECB,
.block_size = 8,
.key_size = {
.min = 24,
.max = 24,
.increment = 0
},
.iv_size = {
.min = 0,
.max = 0,
.increment = 0
}
}, }
}, }
},
{ /* DES CBC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_DES_CBC,
.block_size = 8,
.key_size = {
.min = 8,
.max = 8,
.increment = 0
},
.iv_size = {
.min = 8,
.max = 8,
.increment = 0
}
}, }
}, }
},
};
static const struct rte_cryptodev_capabilities caps_null[] = {
{ /* NULL (AUTH) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_NULL,
.block_size = 1,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.digest_size = {
.min = 0,
.max = 0,
.increment = 0
},
}, },
}, },
},
{ /* NULL (CIPHER) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_NULL,
.block_size = 1,
.key_size = {
.min = 0,
.max = 0,
.increment = 0
},
.iv_size = {
.min = 0,
.max = 0,
.increment = 0
}
}, },
}, }
},
};
static const struct rte_cryptodev_capabilities caps_end[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
static const struct rte_cryptodev_capabilities sec_caps_aes[] = {
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
{.aead = {
.algo = RTE_CRYPTO_AEAD_AES_GCM,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.digest_size = {
.min = 16,
.max = 16,
.increment = 0
},
.aad_size = {
.min = 8,
.max = 12,
.increment = 4
},
.iv_size = {
.min = 12,
.max = 12,
.increment = 0
}
}, }
}, }
},
{ /* AES CBC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
{.cipher = {
.algo = RTE_CRYPTO_CIPHER_AES_CBC,
.block_size = 16,
.key_size = {
.min = 16,
.max = 32,
.increment = 8
},
.iv_size = {
.min = 16,
.max = 16,
.increment = 0
}
}, }
}, }
},
};
static const struct rte_cryptodev_capabilities sec_caps_sha1_sha2[] = {
{ /* SHA1 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
.min = 1,
.max = 1024,
.increment = 1
},
.digest_size = {
.min = 12,
.max = 20,
.increment = 8
},
}, }
}, }
},
{ /* SHA256 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
.min = 1,
.max = 1024,
.increment = 1
},
.digest_size = {
.min = 16,
.max = 32,
.increment = 16
},
}, }
}, }
},
};
static const struct rte_security_capability
otx2_crypto_sec_capabilities[] = {
{ /* IPsec Lookaside Protocol ESP Tunnel Ingress */
.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
.options = { 0 }
},
.crypto_capabilities = otx2_cpt_sec_caps,
.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
},
{ /* IPsec Lookaside Protocol ESP Tunnel Egress */
.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
.ipsec = {
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
.options = { 0 }
},
.crypto_capabilities = otx2_cpt_sec_caps,
.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
},
{
.action = RTE_SECURITY_ACTION_TYPE_NONE
}
};
static void
cpt_caps_add(const struct rte_cryptodev_capabilities *caps, int nb_caps)
{
static int cur_pos;
if (cur_pos + nb_caps > OTX2_CPT_MAX_CAPS)
return;
memcpy(&otx2_cpt_caps[cur_pos], caps, nb_caps * sizeof(caps[0]));
cur_pos += nb_caps;
}
void
otx2_crypto_capabilities_init(union cpt_eng_caps *hw_caps)
{
CPT_CAPS_ADD(hw_caps, mul);
CPT_CAPS_ADD(hw_caps, sha1_sha2);
CPT_CAPS_ADD(hw_caps, chacha20);
CPT_CAPS_ADD(hw_caps, zuc_snow3g);
CPT_CAPS_ADD(hw_caps, aes);
CPT_CAPS_ADD(hw_caps, kasumi);
CPT_CAPS_ADD(hw_caps, des);
cpt_caps_add(caps_null, RTE_DIM(caps_null));
cpt_caps_add(caps_end, RTE_DIM(caps_end));
}
const struct rte_cryptodev_capabilities *
otx2_cpt_capabilities_get(void)
{
return otx2_cpt_caps;
}
static void
sec_caps_add(const struct rte_cryptodev_capabilities *caps, int nb_caps)
{
static int cur_pos;
if (cur_pos + nb_caps > OTX2_SEC_MAX_CAPS)
return;
memcpy(&otx2_cpt_sec_caps[cur_pos], caps, nb_caps * sizeof(caps[0]));
cur_pos += nb_caps;
}
void
otx2_crypto_sec_capabilities_init(union cpt_eng_caps *hw_caps)
{
SEC_CAPS_ADD(hw_caps, aes);
SEC_CAPS_ADD(hw_caps, sha1_sha2);
sec_caps_add(caps_end, RTE_DIM(caps_end));
}
const struct rte_security_capability *
otx2_crypto_sec_capabilities_get(void *device __rte_unused)
{
return otx2_crypto_sec_capabilities;
}

View File

@ -1,45 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_CRYPTODEV_CAPABILITIES_H_
#define _OTX2_CRYPTODEV_CAPABILITIES_H_
#include <rte_cryptodev.h>
#include "otx2_mbox.h"
enum otx2_cpt_egrp {
OTX2_CPT_EGRP_SE = 0,
OTX2_CPT_EGRP_SE_IE = 1,
OTX2_CPT_EGRP_AE = 2,
OTX2_CPT_EGRP_MAX,
};
/*
* Initialize crypto capabilities for the device
*
*/
void otx2_crypto_capabilities_init(union cpt_eng_caps *hw_caps);
/*
* Get capabilities list for the device
*
*/
const struct rte_cryptodev_capabilities *
otx2_cpt_capabilities_get(void);
/*
* Initialize security capabilities for the device
*
*/
void otx2_crypto_sec_capabilities_init(union cpt_eng_caps *hw_caps);
/*
* Get security capabilities list for the device
*
*/
const struct rte_security_capability *
otx2_crypto_sec_capabilities_get(void *device __rte_unused);
#endif /* _OTX2_CRYPTODEV_CAPABILITIES_H_ */

View File

@ -1,225 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#include <rte_cryptodev.h>
#include "otx2_common.h"
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_cryptodev_ops.h"
#include "otx2_dev.h"
#include "cpt_pmd_logs.h"
static void
otx2_cpt_lf_err_intr_handler(void *param)
{
uintptr_t base = (uintptr_t)param;
uint8_t lf_id;
uint64_t intr;
lf_id = (base >> 12) & 0xFF;
intr = otx2_read64(base + OTX2_CPT_LF_MISC_INT);
if (intr == 0)
return;
CPT_LOG_ERR("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
/* Clear interrupt */
otx2_write64(intr, base + OTX2_CPT_LF_MISC_INT);
}
static void
otx2_cpt_lf_err_intr_unregister(const struct rte_cryptodev *dev,
uint16_t msix_off, uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *handle = pci_dev->intr_handle;
/* Disable error interrupts */
otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
otx2_unregister_irq(handle, otx2_cpt_lf_err_intr_handler, (void *)base,
msix_off);
}
void
otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
uintptr_t base;
uint32_t i;
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[i], i);
otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
}
vf->err_intr_registered = 0;
}
static int
otx2_cpt_lf_err_intr_register(const struct rte_cryptodev *dev,
uint16_t msix_off, uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_intr_handle *handle = pci_dev->intr_handle;
int ret;
/* Disable error interrupts */
otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
/* Register error interrupt handler */
ret = otx2_register_irq(handle, otx2_cpt_lf_err_intr_handler,
(void *)base, msix_off);
if (ret)
return ret;
/* Enable error interrupts */
otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1S);
return 0;
}
int
otx2_cpt_err_intr_register(const struct rte_cryptodev *dev)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
uint32_t i, j, ret;
uintptr_t base;
for (i = 0; i < vf->nb_queues; i++) {
if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
CPT_LOG_ERR("Invalid CPT LF MSI-X offset: 0x%x",
vf->lf_msixoff[i]);
return -EINVAL;
}
}
for (i = 0; i < vf->nb_queues; i++) {
base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[i], i);
ret = otx2_cpt_lf_err_intr_register(dev, vf->lf_msixoff[i],
base);
if (ret)
goto intr_unregister;
}
vf->err_intr_registered = 1;
return 0;
intr_unregister:
/* Unregister the ones already registered */
for (j = 0; j < i; j++) {
base = OTX2_CPT_LF_BAR2(vf, vf->lf_blkaddr[j], j);
otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
}
/*
* Failed to register error interrupt. Not returning error as this would
* prevent application from enabling larger number of devs.
*
* This failure is a known issue because otx2_dev_init() initializes
* interrupts based on static values from ATF, and the actual number
* of interrupts needed (which is based on LFs) can be determined only
* after otx2_dev_init() sets up interrupts which includes mbox
* interrupts.
*/
return 0;
}
int
otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
const struct otx2_cpt_qp *qp, uint8_t grp_mask, uint8_t pri,
uint32_t size_div40)
{
union otx2_cpt_af_lf_ctl af_lf_ctl;
union otx2_cpt_lf_inprog inprog;
union otx2_cpt_lf_q_base base;
union otx2_cpt_lf_q_size size;
union otx2_cpt_lf_ctl lf_ctl;
int ret;
/* Set engine group mask and priority */
ret = otx2_cpt_af_reg_read(dev, OTX2_CPT_AF_LF_CTL(qp->id),
qp->blkaddr, &af_lf_ctl.u);
if (ret)
return ret;
af_lf_ctl.s.grp = grp_mask;
af_lf_ctl.s.pri = pri ? 1 : 0;
ret = otx2_cpt_af_reg_write(dev, OTX2_CPT_AF_LF_CTL(qp->id),
qp->blkaddr, af_lf_ctl.u);
if (ret)
return ret;
/* Set instruction queue base address */
base.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_BASE);
base.s.fault = 0;
base.s.stopped = 0;
base.s.addr = qp->iq_dma_addr >> 7;
otx2_write64(base.u, qp->base + OTX2_CPT_LF_Q_BASE);
/* Set instruction queue size */
size.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_SIZE);
size.s.size_div40 = size_div40;
otx2_write64(size.u, qp->base + OTX2_CPT_LF_Q_SIZE);
/* Enable instruction queue */
lf_ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
lf_ctl.s.ena = 1;
otx2_write64(lf_ctl.u, qp->base + OTX2_CPT_LF_CTL);
/* Start instruction execution */
inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
inprog.s.eena = 1;
otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
return 0;
}
void
otx2_cpt_iq_disable(struct otx2_cpt_qp *qp)
{
union otx2_cpt_lf_q_grp_ptr grp_ptr;
union otx2_cpt_lf_inprog inprog;
union otx2_cpt_lf_ctl ctl;
int cnt;
/* Stop instruction execution */
inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
inprog.s.eena = 0x0;
otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
/* Disable instructions enqueuing */
ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
ctl.s.ena = 0;
otx2_write64(ctl.u, qp->base + OTX2_CPT_LF_CTL);
/* Wait for instruction queue to become empty */
cnt = 0;
do {
inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
if (inprog.s.grb_partial)
cnt = 0;
else
cnt++;
grp_ptr.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_GRP_PTR);
} while ((cnt < 10) && (grp_ptr.s.nq_ptr != grp_ptr.s.dq_ptr));
cnt = 0;
do {
inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
if ((inprog.s.inflight == 0) &&
(inprog.s.gwb_cnt < 40) &&
((inprog.s.grb_cnt == 0) || (inprog.s.grb_cnt == 40)))
cnt++;
else
cnt = 0;
} while (cnt < 10);
}

View File

@ -1,161 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_CRYPTODEV_HW_ACCESS_H_
#define _OTX2_CRYPTODEV_HW_ACCESS_H_
#include <stdint.h>
#include <rte_cryptodev.h>
#include <rte_memory.h>
#include "cpt_common.h"
#include "cpt_hw_types.h"
#include "cpt_mcode_defines.h"
#include "otx2_dev.h"
#include "otx2_cryptodev_qp.h"
/* CPT instruction queue length.
* Use queue size as power of 2 for aiding in pending queue calculations.
*/
#define OTX2_CPT_DEFAULT_CMD_QLEN 8192
/* Mask which selects all engine groups */
#define OTX2_CPT_ENG_GRPS_MASK 0xFF
/* Register offsets */
/* LMT LF registers */
#define OTX2_LMT_LF_LMTLINE(a) (0x0ull | (uint64_t)(a) << 3)
/* CPT LF registers */
#define OTX2_CPT_LF_CTL 0x10ull
#define OTX2_CPT_LF_INPROG 0x40ull
#define OTX2_CPT_LF_MISC_INT 0xb0ull
#define OTX2_CPT_LF_MISC_INT_ENA_W1S 0xd0ull
#define OTX2_CPT_LF_MISC_INT_ENA_W1C 0xe0ull
#define OTX2_CPT_LF_Q_BASE 0xf0ull
#define OTX2_CPT_LF_Q_SIZE 0x100ull
#define OTX2_CPT_LF_Q_GRP_PTR 0x120ull
#define OTX2_CPT_LF_NQ(a) (0x400ull | (uint64_t)(a) << 3)
#define OTX2_CPT_AF_LF_CTL(a) (0x27000ull | (uint64_t)(a) << 3)
#define OTX2_CPT_AF_LF_CTL2(a) (0x29000ull | (uint64_t)(a) << 3)
#define OTX2_CPT_LF_BAR2(vf, blk_addr, q_id) \
((vf)->otx2_dev.bar2 + \
((blk_addr << 20) | ((q_id) << 12)))
#define OTX2_CPT_QUEUE_HI_PRIO 0x1
union otx2_cpt_lf_ctl {
uint64_t u;
struct {
uint64_t ena : 1;
uint64_t fc_ena : 1;
uint64_t fc_up_crossing : 1;
uint64_t reserved_3_3 : 1;
uint64_t fc_hyst_bits : 4;
uint64_t reserved_8_63 : 56;
} s;
};
union otx2_cpt_lf_inprog {
uint64_t u;
struct {
uint64_t inflight : 9;
uint64_t reserved_9_15 : 7;
uint64_t eena : 1;
uint64_t grp_drp : 1;
uint64_t reserved_18_30 : 13;
uint64_t grb_partial : 1;
uint64_t grb_cnt : 8;
uint64_t gwb_cnt : 8;
uint64_t reserved_48_63 : 16;
} s;
};
union otx2_cpt_lf_q_base {
uint64_t u;
struct {
uint64_t fault : 1;
uint64_t stopped : 1;
uint64_t reserved_2_6 : 5;
uint64_t addr : 46;
uint64_t reserved_53_63 : 11;
} s;
};
union otx2_cpt_lf_q_size {
uint64_t u;
struct {
uint64_t size_div40 : 15;
uint64_t reserved_15_63 : 49;
} s;
};
union otx2_cpt_af_lf_ctl {
uint64_t u;
struct {
uint64_t pri : 1;
uint64_t reserved_1_8 : 8;
uint64_t pf_func_inst : 1;
uint64_t cont_err : 1;
uint64_t reserved_11_15 : 5;
uint64_t nixtx_en : 1;
uint64_t reserved_17_47 : 31;
uint64_t grp : 8;
uint64_t reserved_56_63 : 8;
} s;
};
union otx2_cpt_af_lf_ctl2 {
uint64_t u;
struct {
uint64_t exe_no_swap : 1;
uint64_t exe_ldwb : 1;
uint64_t reserved_2_31 : 30;
uint64_t sso_pf_func : 16;
uint64_t nix_pf_func : 16;
} s;
};
union otx2_cpt_lf_q_grp_ptr {
uint64_t u;
struct {
uint64_t dq_ptr : 15;
uint64_t reserved_31_15 : 17;
uint64_t nq_ptr : 15;
uint64_t reserved_47_62 : 16;
uint64_t xq_xor : 1;
} s;
};
/*
* Enumeration cpt_9x_comp_e
*
* CPT 9X Completion Enumeration
* Enumerates the values of CPT_RES_S[COMPCODE].
*/
enum cpt_9x_comp_e {
CPT_9X_COMP_E_NOTDONE = 0x00,
CPT_9X_COMP_E_GOOD = 0x01,
CPT_9X_COMP_E_FAULT = 0x02,
CPT_9X_COMP_E_HWERR = 0x04,
CPT_9X_COMP_E_INSTERR = 0x05,
CPT_9X_COMP_E_LAST_ENTRY = 0x06
};
void otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev);
int otx2_cpt_err_intr_register(const struct rte_cryptodev *dev);
int otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
const struct otx2_cpt_qp *qp, uint8_t grp_mask,
uint8_t pri, uint32_t size_div40);
void otx2_cpt_iq_disable(struct otx2_cpt_qp *qp);
#endif /* _OTX2_CRYPTODEV_HW_ACCESS_H_ */

View File

@ -1,285 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#include <cryptodev_pmd.h>
#include <rte_ethdev.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_dev.h"
#include "otx2_ethdev.h"
#include "otx2_sec_idev.h"
#include "otx2_mbox.h"
#include "cpt_pmd_logs.h"
int
otx2_cpt_hardware_caps_get(const struct rte_cryptodev *dev,
union cpt_eng_caps *hw_caps)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_dev *otx2_dev = &vf->otx2_dev;
struct cpt_caps_rsp_msg *rsp;
int ret;
otx2_mbox_alloc_msg_cpt_caps_get(otx2_dev->mbox);
ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
if (ret)
return -EIO;
if (rsp->cpt_pf_drv_version != OTX2_CPT_PMD_VERSION) {
otx2_err("Incompatible CPT PMD version"
"(Kernel: 0x%04x DPDK: 0x%04x)",
rsp->cpt_pf_drv_version, OTX2_CPT_PMD_VERSION);
return -EPIPE;
}
vf->cpt_revision = rsp->cpt_revision;
otx2_mbox_memcpy(hw_caps, rsp->eng_caps,
sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
return 0;
}
int
otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
uint16_t *nb_queues)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_dev *otx2_dev = &vf->otx2_dev;
struct free_rsrcs_rsp *rsp;
int ret;
otx2_mbox_alloc_msg_free_rsrc_cnt(otx2_dev->mbox);
ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
if (ret)
return -EIO;
*nb_queues = rsp->cpt + rsp->cpt1;
return 0;
}
int
otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
int blkaddr[OTX2_CPT_MAX_BLKS];
struct rsrc_attach_req *req;
int blknum = 0;
int i, ret;
blkaddr[0] = RVU_BLOCK_ADDR_CPT0;
blkaddr[1] = RVU_BLOCK_ADDR_CPT1;
/* Ask AF to attach required LFs */
req = otx2_mbox_alloc_msg_attach_resources(mbox);
if ((vf->cpt_revision == OTX2_CPT_REVISION_ID_3) &&
(vf->otx2_dev.pf_func & 0x1))
blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
/* 1 LF = 1 queue */
req->cptlfs = nb_queues;
req->cpt_blkaddr = blkaddr[blknum];
ret = otx2_mbox_process(mbox);
if (ret == -ENOSPC) {
if (vf->cpt_revision == OTX2_CPT_REVISION_ID_3) {
blknum = (blknum + 1) % OTX2_CPT_MAX_BLKS;
req->cpt_blkaddr = blkaddr[blknum];
if (otx2_mbox_process(mbox) < 0)
return -EIO;
} else {
return -EIO;
}
} else if (ret < 0) {
return -EIO;
}
/* Update number of attached queues */
vf->nb_queues = nb_queues;
for (i = 0; i < nb_queues; i++)
vf->lf_blkaddr[i] = req->cpt_blkaddr;
return 0;
}
int
otx2_cpt_queues_detach(const struct rte_cryptodev *dev)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
struct rsrc_detach_req *req;
req = otx2_mbox_alloc_msg_detach_resources(mbox);
req->cptlfs = true;
req->partial = true;
if (otx2_mbox_process(mbox) < 0)
return -EIO;
/* Queues have been detached */
vf->nb_queues = 0;
return 0;
}
int
otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
struct msix_offset_rsp *rsp;
uint32_t i, ret;
/* Get CPT MSI-X vector offsets */
otx2_mbox_alloc_msg_msix_offset(mbox);
ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (ret)
return ret;
for (i = 0; i < vf->nb_queues; i++)
vf->lf_msixoff[i] = (vf->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
rsp->cpt1_lf_msixoff[i] : rsp->cptlf_msixoff[i];
return 0;
}
static int
otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf)
{
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
int ret;
otx2_mbox_msg_send(mbox, 0);
ret = otx2_mbox_wait_for_rsp(mbox, 0);
if (ret < 0) {
CPT_LOG_ERR("Could not get mailbox response");
return ret;
}
return 0;
}
int
otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
uint8_t blkaddr, uint64_t *val)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
struct otx2_mbox_dev *mdev = &mbox->dev[0];
struct cpt_rd_wr_reg_msg *msg;
int ret, off;
msg = (struct cpt_rd_wr_reg_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
sizeof(*msg));
if (msg == NULL) {
CPT_LOG_ERR("Could not allocate mailbox message");
return -EFAULT;
}
msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
msg->hdr.sig = OTX2_MBOX_REQ_SIG;
msg->hdr.pcifunc = vf->otx2_dev.pf_func;
msg->is_write = 0;
msg->reg_offset = reg;
msg->ret_val = val;
msg->blkaddr = blkaddr;
ret = otx2_cpt_send_mbox_msg(vf);
if (ret < 0)
return ret;
off = mbox->rx_start +
RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
msg = (struct cpt_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
*val = msg->val;
return 0;
}
int
otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
uint8_t blkaddr, uint64_t val)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
struct cpt_rd_wr_reg_msg *msg;
msg = (struct cpt_rd_wr_reg_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
sizeof(*msg));
if (msg == NULL) {
CPT_LOG_ERR("Could not allocate mailbox message");
return -EFAULT;
}
msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
msg->hdr.sig = OTX2_MBOX_REQ_SIG;
msg->hdr.pcifunc = vf->otx2_dev.pf_func;
msg->is_write = 1;
msg->reg_offset = reg;
msg->val = val;
msg->blkaddr = blkaddr;
return otx2_cpt_send_mbox_msg(vf);
}
int
otx2_cpt_inline_init(const struct rte_cryptodev *dev)
{
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
struct cpt_rx_inline_lf_cfg_msg *msg;
int ret;
msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
msg->sso_pf_func = otx2_sso_pf_func_get();
otx2_mbox_msg_send(mbox, 0);
ret = otx2_mbox_process(mbox);
if (ret < 0)
return -EIO;
return 0;
}
int
otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp,
uint16_t port_id)
{
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
struct otx2_cpt_vf *vf = dev->data->dev_private;
struct otx2_mbox *mbox = vf->otx2_dev.mbox;
struct cpt_inline_ipsec_cfg_msg *msg;
struct otx2_eth_dev *otx2_eth_dev;
int ret;
if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
return -EINVAL;
otx2_eth_dev = otx2_eth_pmd_priv(eth_dev);
msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
msg->dir = CPT_INLINE_OUTBOUND;
msg->enable = 1;
msg->slot = qp->id;
msg->nix_pf_func = otx2_eth_dev->pf_func;
otx2_mbox_msg_send(mbox, 0);
ret = otx2_mbox_process(mbox);
if (ret < 0)
return -EIO;
return 0;
}

View File

@ -1,37 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_CRYPTODEV_MBOX_H_
#define _OTX2_CRYPTODEV_MBOX_H_
#include <rte_cryptodev.h>
#include "otx2_cryptodev_hw_access.h"
int otx2_cpt_hardware_caps_get(const struct rte_cryptodev *dev,
union cpt_eng_caps *hw_caps);
int otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
uint16_t *nb_queues);
int otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues);
int otx2_cpt_queues_detach(const struct rte_cryptodev *dev);
int otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev);
__rte_internal
int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
uint8_t blkaddr, uint64_t *val);
__rte_internal
int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
uint8_t blkaddr, uint64_t val);
int otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev,
struct otx2_cpt_qp *qp, uint16_t port_id);
int otx2_cpt_inline_init(const struct rte_cryptodev *dev);
#endif /* _OTX2_CRYPTODEV_MBOX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2019 Marvell International Ltd.
*/
#ifndef _OTX2_CRYPTODEV_OPS_H_
#define _OTX2_CRYPTODEV_OPS_H_
#include <cryptodev_pmd.h>
#define OTX2_CPT_MIN_HEADROOM_REQ 48
#define OTX2_CPT_MIN_TAILROOM_REQ 208
extern struct rte_cryptodev_ops otx2_cpt_ops;
#endif /* _OTX2_CRYPTODEV_OPS_H_ */

View File

@ -1,82 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_CRYPTODEV_OPS_HELPER_H_
#define _OTX2_CRYPTODEV_OPS_HELPER_H_
#include "cpt_pmd_logs.h"
static void
sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
{
void *priv = get_sym_session_private_data(sess, driver_id);
struct cpt_sess_misc *misc;
struct rte_mempool *pool;
struct cpt_ctx *ctx;
if (priv == NULL)
return;
misc = priv;
ctx = SESS_PRIV(misc);
if (ctx->auth_key != NULL)
rte_free(ctx->auth_key);
memset(priv, 0, cpt_get_session_size());
pool = rte_mempool_from_obj(priv);
set_sym_session_private_data(sess, driver_id, NULL);
rte_mempool_put(pool, priv);
}
static __rte_always_inline uint8_t
otx2_cpt_compcode_get(struct cpt_request_info *req)
{
volatile struct cpt_res_s_9s *res;
uint8_t ret;
res = (volatile struct cpt_res_s_9s *)req->completion_addr;
if (unlikely(res->compcode == CPT_9X_COMP_E_NOTDONE)) {
if (rte_get_timer_cycles() < req->time_out)
return ERR_REQ_PENDING;
CPT_LOG_DP_ERR("Request timed out");
return ERR_REQ_TIMEOUT;
}
if (likely(res->compcode == CPT_9X_COMP_E_GOOD)) {
ret = NO_ERR;
if (unlikely(res->uc_compcode)) {
ret = res->uc_compcode;
CPT_LOG_DP_DEBUG("Request failed with microcode error");
CPT_LOG_DP_DEBUG("MC completion code 0x%x",
res->uc_compcode);
}
} else {
CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
ret = res->compcode;
switch (res->compcode) {
case CPT_9X_COMP_E_INSTERR:
CPT_LOG_DP_ERR("Request failed with instruction error");
break;
case CPT_9X_COMP_E_FAULT:
CPT_LOG_DP_ERR("Request failed with DMA fault");
break;
case CPT_9X_COMP_E_HWERR:
CPT_LOG_DP_ERR("Request failed with hardware error");
break;
default:
CPT_LOG_DP_ERR("Request failed with unknown completion code");
}
}
return ret;
}
#endif /* _OTX2_CRYPTODEV_OPS_HELPER_H_ */

View File

@ -1,46 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020-2021 Marvell.
*/
#ifndef _OTX2_CRYPTODEV_QP_H_
#define _OTX2_CRYPTODEV_QP_H_
#include <rte_common.h>
#include <rte_eventdev.h>
#include <rte_mempool.h>
#include <rte_spinlock.h>
#include "cpt_common.h"
struct otx2_cpt_qp {
uint32_t id;
/**< Queue pair id */
uint8_t blkaddr;
/**< CPT0/1 BLKADDR of LF */
uintptr_t base;
/**< Base address where BAR is mapped */
void *lmtline;
/**< Address of LMTLINE */
rte_iova_t lf_nq_reg;
/**< LF enqueue register address */
struct pending_queue pend_q;
/**< Pending queue */
struct rte_mempool *sess_mp;
/**< Session mempool */
struct rte_mempool *sess_mp_priv;
/**< Session private data mempool */
struct cpt_qp_meta_info meta_info;
/**< Metabuf info required to support operations on the queue pair */
rte_iova_t iq_dma_addr;
/**< Instruction queue address */
struct rte_event ev;
/**< Event information required for binding cryptodev queue to
* eventdev queue. Used by crypto adapter.
*/
uint8_t ca_enable;
/**< Set when queue pair is added to crypto adapter */
uint8_t qp_ev_bind;
/**< Set when queue pair is bound to event queue */
};
#endif /* _OTX2_CRYPTODEV_QP_H_ */

View File

@ -1,655 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <rte_cryptodev.h>
#include <rte_esp.h>
#include <rte_ethdev.h>
#include <rte_ip.h>
#include <rte_malloc.h>
#include <rte_security.h>
#include <rte_security_driver.h>
#include <rte_udp.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_capabilities.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_ops.h"
#include "otx2_cryptodev_sec.h"
#include "otx2_security.h"
static int
ipsec_lp_len_precalc(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *xform,
struct otx2_sec_session_ipsec_lp *lp)
{
struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
lp->partial_len = 0;
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
lp->partial_len = sizeof(struct rte_ipv4_hdr);
else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
lp->partial_len = sizeof(struct rte_ipv6_hdr);
else
return -EINVAL;
}
if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
lp->partial_len += sizeof(struct rte_esp_hdr);
lp->roundup_len = sizeof(struct rte_esp_tail);
} else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
lp->partial_len += OTX2_SEC_AH_HDR_LEN;
} else {
return -EINVAL;
}
if (ipsec->options.udp_encap)
lp->partial_len += sizeof(struct rte_udp_hdr);
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
lp->partial_len += OTX2_SEC_AES_GCM_IV_LEN;
lp->partial_len += OTX2_SEC_AES_GCM_MAC_LEN;
lp->roundup_byte = OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN;
return 0;
} else {
return -EINVAL;
}
}
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
cipher_xform = xform;
auth_xform = xform->next;
} else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
auth_xform = xform;
cipher_xform = xform->next;
} else {
return -EINVAL;
}
if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
lp->partial_len += OTX2_SEC_AES_CBC_IV_LEN;
lp->roundup_byte = OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN;
} else {
return -EINVAL;
}
if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
lp->partial_len += OTX2_SEC_SHA1_HMAC_LEN;
else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
lp->partial_len += OTX2_SEC_SHA2_HMAC_LEN;
else
return -EINVAL;
return 0;
}
static int
otx2_cpt_enq_sa_write(struct otx2_sec_session_ipsec_lp *lp,
struct otx2_cpt_qp *qptr, uint8_t opcode)
{
uint64_t lmt_status, time_out;
void *lmtline = qptr->lmtline;
struct otx2_cpt_inst_s inst;
struct otx2_cpt_res *res;
uint64_t *mdata;
int ret = 0;
if (unlikely(rte_mempool_get(qptr->meta_info.pool,
(void **)&mdata) < 0))
return -ENOMEM;
res = (struct otx2_cpt_res *)RTE_PTR_ALIGN(mdata, 16);
res->compcode = CPT_9X_COMP_E_NOTDONE;
inst.opcode = opcode | (lp->ctx_len << 8);
inst.param1 = 0;
inst.param2 = 0;
inst.dlen = lp->ctx_len << 3;
inst.dptr = rte_mempool_virt2iova(lp);
inst.rptr = 0;
inst.cptr = rte_mempool_virt2iova(lp);
inst.egrp = OTX2_CPT_EGRP_SE;
inst.u64[0] = 0;
inst.u64[2] = 0;
inst.u64[3] = 0;
inst.res_addr = rte_mempool_virt2iova(res);
rte_io_wmb();
do {
/* Copy CPT command to LMTLINE */
otx2_lmt_mov(lmtline, &inst, 2);
lmt_status = otx2_lmt_submit(qptr->lf_nq_reg);
} while (lmt_status == 0);
time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
while (res->compcode == CPT_9X_COMP_E_NOTDONE) {
if (rte_get_timer_cycles() > time_out) {
rte_mempool_put(qptr->meta_info.pool, mdata);
otx2_err("Request timed out");
return -ETIMEDOUT;
}
rte_io_rmb();
}
if (unlikely(res->compcode != CPT_9X_COMP_E_GOOD)) {
ret = res->compcode;
switch (ret) {
case CPT_9X_COMP_E_INSTERR:
otx2_err("Request failed with instruction error");
break;
case CPT_9X_COMP_E_FAULT:
otx2_err("Request failed with DMA fault");
break;
case CPT_9X_COMP_E_HWERR:
otx2_err("Request failed with hardware error");
break;
default:
otx2_err("Request failed with unknown hardware "
"completion code : 0x%x", ret);
}
goto mempool_put;
}
if (unlikely(res->uc_compcode != OTX2_IPSEC_PO_CC_SUCCESS)) {
ret = res->uc_compcode;
switch (ret) {
case OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED:
otx2_err("Invalid auth type");
break;
case OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED:
otx2_err("Invalid encrypt type");
break;
default:
otx2_err("Request failed with unknown microcode "
"completion code : 0x%x", ret);
}
}
mempool_put:
rte_mempool_put(qptr->meta_info.pool, mdata);
return ret;
}
static void
set_session_misc_attributes(struct otx2_sec_session_ipsec_lp *sess,
struct rte_crypto_sym_xform *crypto_xform,
struct rte_crypto_sym_xform *auth_xform,
struct rte_crypto_sym_xform *cipher_xform)
{
if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
sess->iv_offset = crypto_xform->aead.iv.offset;
sess->iv_length = crypto_xform->aead.iv.length;
sess->aad_length = crypto_xform->aead.aad_length;
sess->mac_len = crypto_xform->aead.digest_length;
} else {
sess->iv_offset = cipher_xform->cipher.iv.offset;
sess->iv_length = cipher_xform->cipher.iv.length;
sess->auth_iv_offset = auth_xform->auth.iv.offset;
sess->auth_iv_length = auth_xform->auth.iv.length;
sess->mac_len = auth_xform->auth.digest_length;
}
}
static int
crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *crypto_xform,
struct rte_security_session *sec_sess)
{
struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
struct otx2_ipsec_po_ip_template *template = NULL;
const uint8_t *cipher_key, *auth_key;
struct otx2_sec_session_ipsec_lp *lp;
struct otx2_ipsec_po_sa_ctl *ctl;
int cipher_key_len, auth_key_len;
struct otx2_ipsec_po_out_sa *sa;
struct otx2_sec_session *sess;
struct otx2_cpt_inst_s inst;
struct rte_ipv6_hdr *ip6;
struct rte_ipv4_hdr *ip;
int ret, ctx_len;
sess = get_sec_session_private_data(sec_sess);
sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
lp = &sess->ipsec.lp;
sa = &lp->out_sa;
ctl = &sa->ctl;
if (ctl->valid) {
otx2_err("SA already registered");
return -EINVAL;
}
memset(sa, 0, sizeof(struct otx2_ipsec_po_out_sa));
/* Initialize lookaside ipsec private data */
lp->ip_id = 0;
lp->seq_lo = 1;
lp->seq_hi = 0;
ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
if (ret)
return ret;
ret = ipsec_lp_len_precalc(ipsec, crypto_xform, lp);
if (ret)
return ret;
/* Start ip id from 1 */
lp->ip_id = 1;
if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
template = &sa->aes_gcm.template;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
aes_gcm.template) + sizeof(
sa->aes_gcm.template.ip4);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else if (ctl->auth_type ==
OTX2_IPSEC_PO_SA_AUTH_SHA1) {
template = &sa->sha1.template;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
sha1.template) + sizeof(
sa->sha1.template.ip4);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else if (ctl->auth_type ==
OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
template = &sa->sha2.template;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
sha2.template) + sizeof(
sa->sha2.template.ip4);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else {
return -EINVAL;
}
ip = &template->ip4.ipv4_hdr;
if (ipsec->options.udp_encap) {
ip->next_proto_id = IPPROTO_UDP;
template->ip4.udp_src = rte_be_to_cpu_16(4500);
template->ip4.udp_dst = rte_be_to_cpu_16(4500);
} else {
ip->next_proto_id = IPPROTO_ESP;
}
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
ip->version_ihl = RTE_IPV4_VHL_DEF;
ip->time_to_live = ipsec->tunnel.ipv4.ttl;
ip->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
if (ipsec->tunnel.ipv4.df)
ip->fragment_offset = BIT(14);
memcpy(&ip->src_addr, &ipsec->tunnel.ipv4.src_ip,
sizeof(struct in_addr));
memcpy(&ip->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
sizeof(struct in_addr));
} else if (ipsec->tunnel.type ==
RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
if (ctl->enc_type == OTX2_IPSEC_PO_SA_ENC_AES_GCM) {
template = &sa->aes_gcm.template;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
aes_gcm.template) + sizeof(
sa->aes_gcm.template.ip6);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else if (ctl->auth_type ==
OTX2_IPSEC_PO_SA_AUTH_SHA1) {
template = &sa->sha1.template;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
sha1.template) + sizeof(
sa->sha1.template.ip6);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else if (ctl->auth_type ==
OTX2_IPSEC_PO_SA_AUTH_SHA2_256) {
template = &sa->sha2.template;
ctx_len = offsetof(struct otx2_ipsec_po_out_sa,
sha2.template) + sizeof(
sa->sha2.template.ip6);
ctx_len = RTE_ALIGN_CEIL(ctx_len, 8);
lp->ctx_len = ctx_len >> 3;
} else {
return -EINVAL;
}
ip6 = &template->ip6.ipv6_hdr;
if (ipsec->options.udp_encap) {
ip6->proto = IPPROTO_UDP;
template->ip6.udp_src = rte_be_to_cpu_16(4500);
template->ip6.udp_dst = rte_be_to_cpu_16(4500);
} else {
ip6->proto = (ipsec->proto ==
RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
IPPROTO_ESP : IPPROTO_AH;
}
ip6->vtc_flow = rte_cpu_to_be_32(0x60000000 |
((ipsec->tunnel.ipv6.dscp <<
RTE_IPV6_HDR_TC_SHIFT) &
RTE_IPV6_HDR_TC_MASK) |
((ipsec->tunnel.ipv6.flabel <<
RTE_IPV6_HDR_FL_SHIFT) &
RTE_IPV6_HDR_FL_MASK));
ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
sizeof(struct in6_addr));
memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
sizeof(struct in6_addr));
}
}
cipher_xform = crypto_xform;
auth_xform = crypto_xform->next;
cipher_key_len = 0;
auth_key_len = 0;
if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
cipher_key = crypto_xform->aead.key.data;
cipher_key_len = crypto_xform->aead.key.length;
} else {
cipher_key = cipher_xform->cipher.key.data;
cipher_key_len = cipher_xform->cipher.key.length;
auth_key = auth_xform->auth.key.data;
auth_key_len = auth_xform->auth.key.length;
if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
memcpy(sa->sha1.hmac_key, auth_key, auth_key_len);
else if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
}
if (cipher_key_len != 0)
memcpy(sa->cipher_key, cipher_key, cipher_key_len);
else
return -EINVAL;
inst.u64[7] = 0;
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);
lp->cpt_inst_w7 = inst.u64[7];
lp->ucmd_opcode = (lp->ctx_len << 8) |
(OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
/* Set per packet IV and IKEv2 bits */
lp->ucmd_param1 = BIT(11) | BIT(9);
lp->ucmd_param2 = 0;
set_session_misc_attributes(lp, crypto_xform,
auth_xform, cipher_xform);
return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
OTX2_IPSEC_PO_WRITE_IPSEC_OUTB);
}
static int
crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *crypto_xform,
struct rte_security_session *sec_sess)
{
struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
const uint8_t *cipher_key, *auth_key;
struct otx2_sec_session_ipsec_lp *lp;
struct otx2_ipsec_po_sa_ctl *ctl;
int cipher_key_len, auth_key_len;
struct otx2_ipsec_po_in_sa *sa;
struct otx2_sec_session *sess;
struct otx2_cpt_inst_s inst;
int ret;
sess = get_sec_session_private_data(sec_sess);
sess->ipsec.dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
lp = &sess->ipsec.lp;
sa = &lp->in_sa;
ctl = &sa->ctl;
if (ctl->valid) {
otx2_err("SA already registered");
return -EINVAL;
}
memset(sa, 0, sizeof(struct otx2_ipsec_po_in_sa));
sa->replay_win_sz = ipsec->replay_win_sz;
ret = ipsec_po_sa_ctl_set(ipsec, crypto_xform, ctl);
if (ret)
return ret;
auth_xform = crypto_xform;
cipher_xform = crypto_xform->next;
cipher_key_len = 0;
auth_key_len = 0;
if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
memcpy(sa->iv.gcm.nonce, &ipsec->salt, 4);
cipher_key = crypto_xform->aead.key.data;
cipher_key_len = crypto_xform->aead.key.length;
lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
aes_gcm.hmac_key[0]) >> 3;
RTE_ASSERT(lp->ctx_len == OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN);
} else {
cipher_key = cipher_xform->cipher.key.data;
cipher_key_len = cipher_xform->cipher.key.length;
auth_key = auth_xform->auth.key.data;
auth_key_len = auth_xform->auth.key.length;
if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) {
memcpy(sa->aes_gcm.hmac_key, auth_key, auth_key_len);
lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
aes_gcm.selector) >> 3;
} else if (auth_xform->auth.algo ==
RTE_CRYPTO_AUTH_SHA256_HMAC) {
memcpy(sa->sha2.hmac_key, auth_key, auth_key_len);
lp->ctx_len = offsetof(struct otx2_ipsec_po_in_sa,
sha2.selector) >> 3;
}
}
if (cipher_key_len != 0)
memcpy(sa->cipher_key, cipher_key, cipher_key_len);
else
return -EINVAL;
inst.u64[7] = 0;
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);
lp->cpt_inst_w7 = inst.u64[7];
lp->ucmd_opcode = (lp->ctx_len << 8) |
(OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
lp->ucmd_param1 = 0;
/* Set IKEv2 bit */
lp->ucmd_param2 = BIT(12);
set_session_misc_attributes(lp, crypto_xform,
auth_xform, cipher_xform);
if (sa->replay_win_sz) {
if (sa->replay_win_sz > OTX2_IPSEC_MAX_REPLAY_WIN_SZ) {
otx2_err("Replay window size is not supported");
return -ENOTSUP;
}
sa->replay = rte_zmalloc(NULL, sizeof(struct otx2_ipsec_replay),
0);
if (sa->replay == NULL)
return -ENOMEM;
/* Set window bottom to 1, base and top to size of window */
sa->replay->winb = 1;
sa->replay->wint = sa->replay_win_sz;
sa->replay->base = sa->replay_win_sz;
sa->esn_low = 0;
sa->esn_hi = 0;
}
return otx2_cpt_enq_sa_write(lp, crypto_dev->data->queue_pairs[0],
OTX2_IPSEC_PO_WRITE_IPSEC_INB);
}
static int
crypto_sec_ipsec_session_create(struct rte_cryptodev *crypto_dev,
struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *crypto_xform,
struct rte_security_session *sess)
{
int ret;
if (crypto_dev->data->queue_pairs[0] == NULL) {
otx2_err("Setup cpt queue pair before creating sec session");
return -EPERM;
}
ret = ipsec_po_xform_verify(ipsec, crypto_xform);
if (ret)
return ret;
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
return crypto_sec_ipsec_inb_session_create(crypto_dev, ipsec,
crypto_xform, sess);
else
return crypto_sec_ipsec_outb_session_create(crypto_dev, ipsec,
crypto_xform, sess);
}
static int
otx2_crypto_sec_session_create(void *device,
struct rte_security_session_conf *conf,
struct rte_security_session *sess,
struct rte_mempool *mempool)
{
struct otx2_sec_session *priv;
int ret;
if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
return -ENOTSUP;
if (rte_security_dynfield_register() < 0)
return -rte_errno;
if (rte_mempool_get(mempool, (void **)&priv)) {
otx2_err("Could not allocate security session private data");
return -ENOMEM;
}
set_sec_session_private_data(sess, priv);
priv->userdata = conf->userdata;
if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
ret = crypto_sec_ipsec_session_create(device, &conf->ipsec,
conf->crypto_xform,
sess);
else
ret = -ENOTSUP;
if (ret)
goto mempool_put;
return 0;
mempool_put:
rte_mempool_put(mempool, priv);
set_sec_session_private_data(sess, NULL);
return ret;
}
static int
otx2_crypto_sec_session_destroy(void *device __rte_unused,
struct rte_security_session *sess)
{
struct otx2_sec_session *priv;
struct rte_mempool *sess_mp;
priv = get_sec_session_private_data(sess);
if (priv == NULL)
return 0;
sess_mp = rte_mempool_from_obj(priv);
memset(priv, 0, sizeof(*priv));
set_sec_session_private_data(sess, NULL);
rte_mempool_put(sess_mp, priv);
return 0;
}
static unsigned int
otx2_crypto_sec_session_get_size(void *device __rte_unused)
{
return sizeof(struct otx2_sec_session);
}
static int
otx2_crypto_sec_set_pkt_mdata(void *device __rte_unused,
struct rte_security_session *session,
struct rte_mbuf *m, void *params __rte_unused)
{
/* Set security session as the pkt metadata */
*rte_security_dynfield(m) = (rte_security_dynfield_t)session;
return 0;
}
static int
otx2_crypto_sec_get_userdata(void *device __rte_unused, uint64_t md,
void **userdata)
{
/* Retrieve userdata */
*userdata = (void *)md;
return 0;
}
static struct rte_security_ops otx2_crypto_sec_ops = {
.session_create = otx2_crypto_sec_session_create,
.session_destroy = otx2_crypto_sec_session_destroy,
.session_get_size = otx2_crypto_sec_session_get_size,
.set_pkt_metadata = otx2_crypto_sec_set_pkt_mdata,
.get_userdata = otx2_crypto_sec_get_userdata,
.capabilities_get = otx2_crypto_sec_capabilities_get
};
int
otx2_crypto_sec_ctx_create(struct rte_cryptodev *cdev)
{
struct rte_security_ctx *ctx;
ctx = rte_malloc("otx2_cpt_dev_sec_ctx",
sizeof(struct rte_security_ctx), 0);
if (ctx == NULL)
return -ENOMEM;
/* Populate ctx */
ctx->device = cdev;
ctx->ops = &otx2_crypto_sec_ops;
ctx->sess_cnt = 0;
cdev->security_ctx = ctx;
return 0;
}
void
otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *cdev)
{
rte_free(cdev->security_ctx);
}

View File

@ -1,64 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __OTX2_CRYPTODEV_SEC_H__
#define __OTX2_CRYPTODEV_SEC_H__
#include <rte_cryptodev.h>
#include "otx2_ipsec_po.h"
struct otx2_sec_session_ipsec_lp {
RTE_STD_C11
union {
/* Inbound SA */
struct otx2_ipsec_po_in_sa in_sa;
/* Outbound SA */
struct otx2_ipsec_po_out_sa out_sa;
};
uint64_t cpt_inst_w7;
union {
uint64_t ucmd_w0;
struct {
uint16_t ucmd_dlen;
uint16_t ucmd_param2;
uint16_t ucmd_param1;
uint16_t ucmd_opcode;
};
};
uint8_t partial_len;
uint8_t roundup_len;
uint8_t roundup_byte;
uint16_t ip_id;
union {
uint64_t esn;
struct {
uint32_t seq_lo;
uint32_t seq_hi;
};
};
/** Context length in 8-byte words */
size_t ctx_len;
/** Auth IV offset in bytes */
uint16_t auth_iv_offset;
/** IV offset in bytes */
uint16_t iv_offset;
/** AAD length */
uint16_t aad_length;
/** MAC len in bytes */
uint8_t mac_len;
/** IV length in bytes */
uint8_t iv_length;
/** Auth IV length in bytes */
uint8_t auth_iv_length;
};
int otx2_crypto_sec_ctx_create(struct rte_cryptodev *crypto_dev);
void otx2_crypto_sec_ctx_destroy(struct rte_cryptodev *crypto_dev);
#endif /* __OTX2_CRYPTODEV_SEC_H__ */

View File

@ -1,227 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __OTX2_IPSEC_ANTI_REPLAY_H__
#define __OTX2_IPSEC_ANTI_REPLAY_H__
#include <rte_mbuf.h>
#include "otx2_ipsec_fp.h"
#define WORD_SHIFT 6
#define WORD_SIZE (1 << WORD_SHIFT)
#define WORD_MASK (WORD_SIZE - 1)
#define IPSEC_ANTI_REPLAY_FAILED (-1)
static inline int
anti_replay_check(struct otx2_ipsec_replay *replay, uint64_t seq,
uint64_t winsz)
{
uint64_t *window = &replay->window[0];
uint64_t ex_winsz = winsz + WORD_SIZE;
uint64_t winwords = ex_winsz >> WORD_SHIFT;
uint64_t base = replay->base;
uint32_t winb = replay->winb;
uint32_t wint = replay->wint;
uint64_t seqword, shiftwords;
uint64_t bit_pos;
uint64_t shift;
uint64_t *wptr;
uint64_t tmp;
if (winsz > 64)
goto slow_shift;
/* Check if the seq is the biggest one yet */
if (likely(seq > base)) {
shift = seq - base;
if (shift < winsz) { /* In window */
/*
* If more than 64-bit anti-replay window,
* use slow shift routine
*/
wptr = window + (shift >> WORD_SHIFT);
*wptr <<= shift;
*wptr |= 1ull;
} else {
/* No special handling of window size > 64 */
wptr = window + ((winsz - 1) >> WORD_SHIFT);
/*
* Zero out the whole window (especially for
* bigger than 64b window) till the last 64b word
* as the incoming sequence number minus
* base sequence is more than the window size.
*/
while (window != wptr)
*window++ = 0ull;
/*
* Set the last bit (of the window) to 1
* as that corresponds to the base sequence number.
* Now any incoming sequence number which is
* (base - window size - 1) will pass anti-replay check
*/
*wptr = 1ull;
}
/*
* Set the base to incoming sequence number as
* that is the biggest sequence number seen yet
*/
replay->base = seq;
return 0;
}
bit_pos = base - seq;
/* If seq falls behind the window, return failure */
if (bit_pos >= winsz)
return IPSEC_ANTI_REPLAY_FAILED;
/* seq is within anti-replay window */
wptr = window + ((winsz - bit_pos - 1) >> WORD_SHIFT);
bit_pos &= WORD_MASK;
/* Check if this is a replayed packet */
if (*wptr & ((1ull) << bit_pos))
return IPSEC_ANTI_REPLAY_FAILED;
/* mark as seen */
*wptr |= ((1ull) << bit_pos);
return 0;
slow_shift:
if (likely(seq > base)) {
uint32_t i;
shift = seq - base;
if (unlikely(shift >= winsz)) {
/*
* shift is bigger than the window,
* so just zero out everything
*/
for (i = 0; i < winwords; i++)
window[i] = 0;
winupdate:
/* Find out the word */
seqword = ((seq - 1) % ex_winsz) >> WORD_SHIFT;
/* Find out the bit in the word */
bit_pos = (seq - 1) & WORD_MASK;
/*
* Set the bit corresponding to sequence number
* in window to mark it as received
*/
window[seqword] |= (1ull << (63 - bit_pos));
/* wint and winb range from 1 to ex_winsz */
replay->wint = ((wint + shift - 1) % ex_winsz) + 1;
replay->winb = ((winb + shift - 1) % ex_winsz) + 1;
replay->base = seq;
return 0;
}
/*
* New sequence number is bigger than the base but
* it's not bigger than base + window size
*/
shiftwords = ((wint + shift - 1) >> WORD_SHIFT) -
((wint - 1) >> WORD_SHIFT);
if (unlikely(shiftwords)) {
tmp = (wint + WORD_SIZE - 1) / WORD_SIZE;
for (i = 0; i < shiftwords; i++) {
tmp %= winwords;
window[tmp++] = 0;
}
}
goto winupdate;
}
/* Sequence number is before the window */
if (unlikely((seq + winsz) <= base))
return IPSEC_ANTI_REPLAY_FAILED;
/* Sequence number is within the window */
/* Find out the word */
seqword = ((seq - 1) % ex_winsz) >> WORD_SHIFT;
/* Find out the bit in the word */
bit_pos = (seq - 1) & WORD_MASK;
/* Check if this is a replayed packet */
if (window[seqword] & (1ull << (63 - bit_pos)))
return IPSEC_ANTI_REPLAY_FAILED;
/*
* Set the bit corresponding to sequence number
* in window to mark it as received
*/
window[seqword] |= (1ull << (63 - bit_pos));
return 0;
}
static inline int
cpt_ipsec_ip_antireplay_check(struct otx2_ipsec_fp_in_sa *sa, void *l3_ptr)
{
struct otx2_ipsec_fp_res_hdr *hdr = l3_ptr;
uint64_t seq_in_sa;
uint32_t seqh = 0;
uint32_t seql;
uint64_t seq;
uint8_t esn;
int ret;
esn = sa->ctl.esn_en;
seql = rte_be_to_cpu_32(hdr->seq_no_lo);
if (!esn)
seq = (uint64_t)seql;
else {
seqh = rte_be_to_cpu_32(hdr->seq_no_hi);
seq = ((uint64_t)seqh << 32) | seql;
}
if (unlikely(seq == 0))
return IPSEC_ANTI_REPLAY_FAILED;
rte_spinlock_lock(&sa->replay->lock);
ret = anti_replay_check(sa->replay, seq, sa->replay_win_sz);
if (esn && (ret == 0)) {
seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->esn_hi) << 32) |
rte_be_to_cpu_32(sa->esn_low);
if (seq > seq_in_sa) {
sa->esn_low = rte_cpu_to_be_32(seql);
sa->esn_hi = rte_cpu_to_be_32(seqh);
}
}
rte_spinlock_unlock(&sa->replay->lock);
return ret;
}
static inline uint32_t
anti_replay_get_seqh(uint32_t winsz, uint32_t seql,
uint32_t esn_hi, uint32_t esn_low)
{
uint32_t win_low = esn_low - winsz + 1;
if (esn_low > winsz - 1) {
/* Window is in one sequence number subspace */
if (seql > win_low)
return esn_hi;
else
return esn_hi + 1;
} else {
/* Window is split across two sequence number subspaces */
if (seql > win_low)
return esn_hi - 1;
else
return esn_hi;
}
}
#endif /* __OTX2_IPSEC_ANTI_REPLAY_H__ */

View File

@ -1,371 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2020 Marvell International Ltd.
*/
#ifndef __OTX2_IPSEC_FP_H__
#define __OTX2_IPSEC_FP_H__
#include <rte_crypto_sym.h>
#include <rte_security.h>
/* Macros for anti replay and ESN */
#define OTX2_IPSEC_MAX_REPLAY_WIN_SZ 1024
struct otx2_ipsec_fp_res_hdr {
uint32_t spi;
uint32_t seq_no_lo;
uint32_t seq_no_hi;
uint32_t rsvd;
};
enum {
OTX2_IPSEC_FP_SA_DIRECTION_INBOUND = 0,
OTX2_IPSEC_FP_SA_DIRECTION_OUTBOUND = 1,
};
enum {
OTX2_IPSEC_FP_SA_IP_VERSION_4 = 0,
OTX2_IPSEC_FP_SA_IP_VERSION_6 = 1,
};
enum {
OTX2_IPSEC_FP_SA_MODE_TRANSPORT = 0,
OTX2_IPSEC_FP_SA_MODE_TUNNEL = 1,
};
enum {
OTX2_IPSEC_FP_SA_PROTOCOL_AH = 0,
OTX2_IPSEC_FP_SA_PROTOCOL_ESP = 1,
};
enum {
OTX2_IPSEC_FP_SA_AES_KEY_LEN_128 = 1,
OTX2_IPSEC_FP_SA_AES_KEY_LEN_192 = 2,
OTX2_IPSEC_FP_SA_AES_KEY_LEN_256 = 3,
};
enum {
OTX2_IPSEC_FP_SA_ENC_NULL = 0,
OTX2_IPSEC_FP_SA_ENC_DES_CBC = 1,
OTX2_IPSEC_FP_SA_ENC_3DES_CBC = 2,
OTX2_IPSEC_FP_SA_ENC_AES_CBC = 3,
OTX2_IPSEC_FP_SA_ENC_AES_CTR = 4,
OTX2_IPSEC_FP_SA_ENC_AES_GCM = 5,
OTX2_IPSEC_FP_SA_ENC_AES_CCM = 6,
};
enum {
OTX2_IPSEC_FP_SA_AUTH_NULL = 0,
OTX2_IPSEC_FP_SA_AUTH_MD5 = 1,
OTX2_IPSEC_FP_SA_AUTH_SHA1 = 2,
OTX2_IPSEC_FP_SA_AUTH_SHA2_224 = 3,
OTX2_IPSEC_FP_SA_AUTH_SHA2_256 = 4,
OTX2_IPSEC_FP_SA_AUTH_SHA2_384 = 5,
OTX2_IPSEC_FP_SA_AUTH_SHA2_512 = 6,
OTX2_IPSEC_FP_SA_AUTH_AES_GMAC = 7,
OTX2_IPSEC_FP_SA_AUTH_AES_XCBC_128 = 8,
};
enum {
OTX2_IPSEC_FP_SA_FRAG_POST = 0,
OTX2_IPSEC_FP_SA_FRAG_PRE = 1,
};
enum {
OTX2_IPSEC_FP_SA_ENCAP_NONE = 0,
OTX2_IPSEC_FP_SA_ENCAP_UDP = 1,
};
struct otx2_ipsec_fp_sa_ctl {
rte_be32_t spi : 32;
uint64_t exp_proto_inter_frag : 8;
uint64_t rsvd_42_40 : 3;
uint64_t esn_en : 1;
uint64_t rsvd_45_44 : 2;
uint64_t encap_type : 2;
uint64_t enc_type : 3;
uint64_t rsvd_48 : 1;
uint64_t auth_type : 4;
uint64_t valid : 1;
uint64_t direction : 1;
uint64_t outer_ip_ver : 1;
uint64_t inner_ip_ver : 1;
uint64_t ipsec_mode : 1;
uint64_t ipsec_proto : 1;
uint64_t aes_key_len : 2;
};
struct otx2_ipsec_fp_out_sa {
/* w0 */
struct otx2_ipsec_fp_sa_ctl ctl;
/* w1 */
uint8_t nonce[4];
uint16_t udp_src;
uint16_t udp_dst;
/* w2 */
uint32_t ip_src;
uint32_t ip_dst;
/* w3-w6 */
uint8_t cipher_key[32];
/* w7-w12 */
uint8_t hmac_key[48];
};
struct otx2_ipsec_replay {
rte_spinlock_t lock;
uint32_t winb;
uint32_t wint;
uint64_t base; /**< base of the anti-replay window */
uint64_t window[17]; /**< anti-replay window */
};
struct otx2_ipsec_fp_in_sa {
/* w0 */
struct otx2_ipsec_fp_sa_ctl ctl;
/* w1 */
uint8_t nonce[4]; /* Only for AES-GCM */
uint32_t unused;
/* w2 */
uint32_t esn_hi;
uint32_t esn_low;
/* w3-w6 */
uint8_t cipher_key[32];
/* w7-w12 */
uint8_t hmac_key[48];
RTE_STD_C11
union {
void *userdata;
uint64_t udata64;
};
union {
struct otx2_ipsec_replay *replay;
uint64_t replay64;
};
uint32_t replay_win_sz;
uint32_t reserved1;
};
static inline int
ipsec_fp_xform_cipher_verify(struct rte_crypto_sym_xform *xform)
{
if (xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
switch (xform->cipher.key.length) {
case 16:
case 24:
case 32:
break;
default:
return -ENOTSUP;
}
return 0;
}
return -ENOTSUP;
}
static inline int
ipsec_fp_xform_auth_verify(struct rte_crypto_sym_xform *xform)
{
uint16_t keylen = xform->auth.key.length;
if (xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) {
if (keylen >= 20 && keylen <= 64)
return 0;
}
return -ENOTSUP;
}
static inline int
ipsec_fp_xform_aead_verify(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *xform)
{
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
xform->aead.op != RTE_CRYPTO_AEAD_OP_ENCRYPT)
return -EINVAL;
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
xform->aead.op != RTE_CRYPTO_AEAD_OP_DECRYPT)
return -EINVAL;
if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
switch (xform->aead.key.length) {
case 16:
case 24:
case 32:
break;
default:
return -EINVAL;
}
return 0;
}
return -ENOTSUP;
}
static inline int
ipsec_fp_xform_verify(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *xform)
{
struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
int ret;
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
return ipsec_fp_xform_aead_verify(ipsec, xform);
if (xform->next == NULL)
return -EINVAL;
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
/* Ingress */
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
xform->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
return -EINVAL;
auth_xform = xform;
cipher_xform = xform->next;
} else {
/* Egress */
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
xform->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
return -EINVAL;
cipher_xform = xform;
auth_xform = xform->next;
}
ret = ipsec_fp_xform_cipher_verify(cipher_xform);
if (ret)
return ret;
ret = ipsec_fp_xform_auth_verify(auth_xform);
if (ret)
return ret;
return 0;
}
static inline int
ipsec_fp_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *xform,
struct otx2_ipsec_fp_sa_ctl *ctl)
{
struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
int aes_key_len;
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
ctl->direction = OTX2_IPSEC_FP_SA_DIRECTION_OUTBOUND;
cipher_xform = xform;
auth_xform = xform->next;
} else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
ctl->direction = OTX2_IPSEC_FP_SA_DIRECTION_INBOUND;
auth_xform = xform;
cipher_xform = xform->next;
} else {
return -EINVAL;
}
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
ctl->outer_ip_ver = OTX2_IPSEC_FP_SA_IP_VERSION_4;
else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
ctl->outer_ip_ver = OTX2_IPSEC_FP_SA_IP_VERSION_6;
else
return -EINVAL;
}
ctl->inner_ip_ver = OTX2_IPSEC_FP_SA_IP_VERSION_4;
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT)
ctl->ipsec_mode = OTX2_IPSEC_FP_SA_MODE_TRANSPORT;
else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
ctl->ipsec_mode = OTX2_IPSEC_FP_SA_MODE_TUNNEL;
else
return -EINVAL;
if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
ctl->ipsec_proto = OTX2_IPSEC_FP_SA_PROTOCOL_AH;
else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
ctl->ipsec_proto = OTX2_IPSEC_FP_SA_PROTOCOL_ESP;
else
return -EINVAL;
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
ctl->enc_type = OTX2_IPSEC_FP_SA_ENC_AES_GCM;
aes_key_len = xform->aead.key.length;
} else {
return -ENOTSUP;
}
} else if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
ctl->enc_type = OTX2_IPSEC_FP_SA_ENC_AES_CBC;
aes_key_len = cipher_xform->cipher.key.length;
} else {
return -ENOTSUP;
}
switch (aes_key_len) {
case 16:
ctl->aes_key_len = OTX2_IPSEC_FP_SA_AES_KEY_LEN_128;
break;
case 24:
ctl->aes_key_len = OTX2_IPSEC_FP_SA_AES_KEY_LEN_192;
break;
case 32:
ctl->aes_key_len = OTX2_IPSEC_FP_SA_AES_KEY_LEN_256;
break;
default:
return -EINVAL;
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
switch (auth_xform->auth.algo) {
case RTE_CRYPTO_AUTH_NULL:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_NULL;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_MD5;
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA1;
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_224;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_256;
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_384;
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_512;
break;
case RTE_CRYPTO_AUTH_AES_GMAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_AES_GMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_AES_XCBC_128;
break;
default:
return -ENOTSUP;
}
}
if (ipsec->options.esn == 1)
ctl->esn_en = 1;
ctl->spi = rte_cpu_to_be_32(ipsec->spi);
return 0;
}
#endif /* __OTX2_IPSEC_FP_H__ */

View File

@ -1,447 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2020 Marvell International Ltd.
*/
#ifndef __OTX2_IPSEC_PO_H__
#define __OTX2_IPSEC_PO_H__
#include <rte_crypto_sym.h>
#include <rte_ip.h>
#include <rte_security.h>
#define OTX2_IPSEC_PO_AES_GCM_INB_CTX_LEN 0x09
#define OTX2_IPSEC_PO_WRITE_IPSEC_OUTB 0x20
#define OTX2_IPSEC_PO_WRITE_IPSEC_INB 0x21
#define OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB 0x23
#define OTX2_IPSEC_PO_PROCESS_IPSEC_INB 0x24
#define OTX2_IPSEC_PO_INB_RPTR_HDR 0x8
enum otx2_ipsec_po_comp_e {
OTX2_IPSEC_PO_CC_SUCCESS = 0x00,
OTX2_IPSEC_PO_CC_AUTH_UNSUPPORTED = 0xB0,
OTX2_IPSEC_PO_CC_ENCRYPT_UNSUPPORTED = 0xB1,
};
enum {
OTX2_IPSEC_PO_SA_DIRECTION_INBOUND = 0,
OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND = 1,
};
enum {
OTX2_IPSEC_PO_SA_IP_VERSION_4 = 0,
OTX2_IPSEC_PO_SA_IP_VERSION_6 = 1,
};
enum {
OTX2_IPSEC_PO_SA_MODE_TRANSPORT = 0,
OTX2_IPSEC_PO_SA_MODE_TUNNEL = 1,
};
enum {
OTX2_IPSEC_PO_SA_PROTOCOL_AH = 0,
OTX2_IPSEC_PO_SA_PROTOCOL_ESP = 1,
};
enum {
OTX2_IPSEC_PO_SA_AES_KEY_LEN_128 = 1,
OTX2_IPSEC_PO_SA_AES_KEY_LEN_192 = 2,
OTX2_IPSEC_PO_SA_AES_KEY_LEN_256 = 3,
};
enum {
OTX2_IPSEC_PO_SA_ENC_NULL = 0,
OTX2_IPSEC_PO_SA_ENC_DES_CBC = 1,
OTX2_IPSEC_PO_SA_ENC_3DES_CBC = 2,
OTX2_IPSEC_PO_SA_ENC_AES_CBC = 3,
OTX2_IPSEC_PO_SA_ENC_AES_CTR = 4,
OTX2_IPSEC_PO_SA_ENC_AES_GCM = 5,
OTX2_IPSEC_PO_SA_ENC_AES_CCM = 6,
};
enum {
OTX2_IPSEC_PO_SA_AUTH_NULL = 0,
OTX2_IPSEC_PO_SA_AUTH_MD5 = 1,
OTX2_IPSEC_PO_SA_AUTH_SHA1 = 2,
OTX2_IPSEC_PO_SA_AUTH_SHA2_224 = 3,
OTX2_IPSEC_PO_SA_AUTH_SHA2_256 = 4,
OTX2_IPSEC_PO_SA_AUTH_SHA2_384 = 5,
OTX2_IPSEC_PO_SA_AUTH_SHA2_512 = 6,
OTX2_IPSEC_PO_SA_AUTH_AES_GMAC = 7,
OTX2_IPSEC_PO_SA_AUTH_AES_XCBC_128 = 8,
};
enum {
OTX2_IPSEC_PO_SA_FRAG_POST = 0,
OTX2_IPSEC_PO_SA_FRAG_PRE = 1,
};
enum {
OTX2_IPSEC_PO_SA_ENCAP_NONE = 0,
OTX2_IPSEC_PO_SA_ENCAP_UDP = 1,
};
struct otx2_ipsec_po_out_hdr {
uint32_t ip_id;
uint32_t seq;
uint8_t iv[16];
};
union otx2_ipsec_po_bit_perfect_iv {
uint8_t aes_iv[16];
uint8_t des_iv[8];
struct {
uint8_t nonce[4];
uint8_t iv[8];
uint8_t counter[4];
} gcm;
};
struct otx2_ipsec_po_traffic_selector {
rte_be16_t src_port[2];
rte_be16_t dst_port[2];
RTE_STD_C11
union {
struct {
rte_be32_t src_addr[2];
rte_be32_t dst_addr[2];
} ipv4;
struct {
uint8_t src_addr[32];
uint8_t dst_addr[32];
} ipv6;
};
};
struct otx2_ipsec_po_sa_ctl {
rte_be32_t spi : 32;
uint64_t exp_proto_inter_frag : 8;
uint64_t rsvd_42_40 : 3;
uint64_t esn_en : 1;
uint64_t rsvd_45_44 : 2;
uint64_t encap_type : 2;
uint64_t enc_type : 3;
uint64_t rsvd_48 : 1;
uint64_t auth_type : 4;
uint64_t valid : 1;
uint64_t direction : 1;
uint64_t outer_ip_ver : 1;
uint64_t inner_ip_ver : 1;
uint64_t ipsec_mode : 1;
uint64_t ipsec_proto : 1;
uint64_t aes_key_len : 2;
};
struct otx2_ipsec_po_in_sa {
/* w0 */
struct otx2_ipsec_po_sa_ctl ctl;
/* w1-w4 */
uint8_t cipher_key[32];
/* w5-w6 */
union otx2_ipsec_po_bit_perfect_iv iv;
/* w7 */
uint32_t esn_hi;
uint32_t esn_low;
/* w8 */
uint8_t udp_encap[8];
/* w9-w33 */
union {
struct {
uint8_t hmac_key[48];
struct otx2_ipsec_po_traffic_selector selector;
} aes_gcm;
struct {
uint8_t hmac_key[64];
uint8_t hmac_iv[64];
struct otx2_ipsec_po_traffic_selector selector;
} sha2;
};
union {
struct otx2_ipsec_replay *replay;
uint64_t replay64;
};
uint32_t replay_win_sz;
};
struct otx2_ipsec_po_ip_template {
RTE_STD_C11
union {
struct {
struct rte_ipv4_hdr ipv4_hdr;
uint16_t udp_src;
uint16_t udp_dst;
} ip4;
struct {
struct rte_ipv6_hdr ipv6_hdr;
uint16_t udp_src;
uint16_t udp_dst;
} ip6;
};
};
struct otx2_ipsec_po_out_sa {
/* w0 */
struct otx2_ipsec_po_sa_ctl ctl;
/* w1-w4 */
uint8_t cipher_key[32];
/* w5-w6 */
union otx2_ipsec_po_bit_perfect_iv iv;
/* w7 */
uint32_t esn_hi;
uint32_t esn_low;
/* w8-w55 */
union {
struct {
struct otx2_ipsec_po_ip_template template;
} aes_gcm;
struct {
uint8_t hmac_key[24];
uint8_t unused[24];
struct otx2_ipsec_po_ip_template template;
} sha1;
struct {
uint8_t hmac_key[64];
uint8_t hmac_iv[64];
struct otx2_ipsec_po_ip_template template;
} sha2;
};
};
static inline int
ipsec_po_xform_cipher_verify(struct rte_crypto_sym_xform *xform)
{
if (xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
switch (xform->cipher.key.length) {
case 16:
case 24:
case 32:
break;
default:
return -ENOTSUP;
}
return 0;
}
return -ENOTSUP;
}
static inline int
ipsec_po_xform_auth_verify(struct rte_crypto_sym_xform *xform)
{
uint16_t keylen = xform->auth.key.length;
if (xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) {
if (keylen >= 20 && keylen <= 64)
return 0;
} else if (xform->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC) {
if (keylen >= 32 && keylen <= 64)
return 0;
}
return -ENOTSUP;
}
static inline int
ipsec_po_xform_aead_verify(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *xform)
{
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
xform->aead.op != RTE_CRYPTO_AEAD_OP_ENCRYPT)
return -EINVAL;
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
xform->aead.op != RTE_CRYPTO_AEAD_OP_DECRYPT)
return -EINVAL;
if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
switch (xform->aead.key.length) {
case 16:
case 24:
case 32:
break;
default:
return -EINVAL;
}
return 0;
}
return -ENOTSUP;
}
static inline int
ipsec_po_xform_verify(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *xform)
{
struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
int ret;
if (ipsec->life.bytes_hard_limit != 0 ||
ipsec->life.bytes_soft_limit != 0 ||
ipsec->life.packets_hard_limit != 0 ||
ipsec->life.packets_soft_limit != 0)
return -ENOTSUP;
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
return ipsec_po_xform_aead_verify(ipsec, xform);
if (xform->next == NULL)
return -EINVAL;
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
/* Ingress */
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
xform->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
return -EINVAL;
auth_xform = xform;
cipher_xform = xform->next;
} else {
/* Egress */
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
xform->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
return -EINVAL;
cipher_xform = xform;
auth_xform = xform->next;
}
ret = ipsec_po_xform_cipher_verify(cipher_xform);
if (ret)
return ret;
ret = ipsec_po_xform_auth_verify(auth_xform);
if (ret)
return ret;
return 0;
}
static inline int
ipsec_po_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *xform,
struct otx2_ipsec_po_sa_ctl *ctl)
{
struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
int aes_key_len;
if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
ctl->direction = OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND;
cipher_xform = xform;
auth_xform = xform->next;
} else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
ctl->direction = OTX2_IPSEC_PO_SA_DIRECTION_INBOUND;
auth_xform = xform;
cipher_xform = xform->next;
} else {
return -EINVAL;
}
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
ctl->outer_ip_ver = OTX2_IPSEC_PO_SA_IP_VERSION_4;
else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
ctl->outer_ip_ver = OTX2_IPSEC_PO_SA_IP_VERSION_6;
else
return -EINVAL;
}
ctl->inner_ip_ver = ctl->outer_ip_ver;
if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT)
ctl->ipsec_mode = OTX2_IPSEC_PO_SA_MODE_TRANSPORT;
else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
ctl->ipsec_mode = OTX2_IPSEC_PO_SA_MODE_TUNNEL;
else
return -EINVAL;
if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
ctl->ipsec_proto = OTX2_IPSEC_PO_SA_PROTOCOL_AH;
else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
ctl->ipsec_proto = OTX2_IPSEC_PO_SA_PROTOCOL_ESP;
else
return -EINVAL;
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
ctl->enc_type = OTX2_IPSEC_PO_SA_ENC_AES_GCM;
aes_key_len = xform->aead.key.length;
} else {
return -ENOTSUP;
}
} else if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
ctl->enc_type = OTX2_IPSEC_PO_SA_ENC_AES_CBC;
aes_key_len = cipher_xform->cipher.key.length;
} else {
return -ENOTSUP;
}
switch (aes_key_len) {
case 16:
ctl->aes_key_len = OTX2_IPSEC_PO_SA_AES_KEY_LEN_128;
break;
case 24:
ctl->aes_key_len = OTX2_IPSEC_PO_SA_AES_KEY_LEN_192;
break;
case 32:
ctl->aes_key_len = OTX2_IPSEC_PO_SA_AES_KEY_LEN_256;
break;
default:
return -EINVAL;
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
switch (auth_xform->auth.algo) {
case RTE_CRYPTO_AUTH_NULL:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_NULL;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_MD5;
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_SHA1;
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_SHA2_224;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_SHA2_256;
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_SHA2_384;
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_SHA2_512;
break;
case RTE_CRYPTO_AUTH_AES_GMAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_AES_GMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
ctl->auth_type = OTX2_IPSEC_PO_SA_AUTH_AES_XCBC_128;
break;
default:
return -ENOTSUP;
}
}
if (ipsec->options.esn)
ctl->esn_en = 1;
if (ipsec->options.udp_encap == 1)
ctl->encap_type = OTX2_IPSEC_PO_SA_ENCAP_UDP;
ctl->spi = rte_cpu_to_be_32(ipsec->spi);
ctl->valid = 1;
return 0;
}
#endif /* __OTX2_IPSEC_PO_H__ */

View File

@ -1,167 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_IPSEC_PO_OPS_H__
#define __OTX2_IPSEC_PO_OPS_H__
#include <rte_crypto_sym.h>
#include <rte_security.h>
#include "otx2_cryptodev.h"
#include "otx2_security.h"
static __rte_always_inline int32_t
otx2_ipsec_po_out_rlen_get(struct otx2_sec_session_ipsec_lp *sess,
uint32_t plen)
{
uint32_t enc_payload_len;
enc_payload_len = RTE_ALIGN_CEIL(plen + sess->roundup_len,
sess->roundup_byte);
return sess->partial_len + enc_payload_len;
}
static __rte_always_inline struct cpt_request_info *
alloc_request_struct(char *maddr, void *cop, int mdata_len)
{
struct cpt_request_info *req;
struct cpt_meta_info *meta;
uint8_t *resp_addr;
uintptr_t *op;
meta = (void *)RTE_PTR_ALIGN((uint8_t *)maddr, 16);
op = (uintptr_t *)meta->deq_op_info;
req = &meta->cpt_req;
resp_addr = (uint8_t *)&meta->cpt_res;
req->completion_addr = (uint64_t *)((uint8_t *)resp_addr);
*req->completion_addr = COMPLETION_CODE_INIT;
req->comp_baddr = rte_mem_virt2iova(resp_addr);
req->op = op;
op[0] = (uintptr_t)((uint64_t)meta | 1ull);
op[1] = (uintptr_t)cop;
op[2] = (uintptr_t)req;
op[3] = mdata_len;
return req;
}
static __rte_always_inline int
process_outb_sa(struct rte_crypto_op *cop,
struct otx2_sec_session_ipsec_lp *sess,
struct cpt_qp_meta_info *m_info, void **prep_req)
{
uint32_t dlen, rlen, extend_head, extend_tail;
struct rte_crypto_sym_op *sym_op = cop->sym;
struct rte_mbuf *m_src = sym_op->m_src;
struct cpt_request_info *req = NULL;
struct otx2_ipsec_po_out_hdr *hdr;
struct otx2_ipsec_po_out_sa *sa;
int hdr_len, mdata_len, ret = 0;
vq_cmd_word0_t word0;
char *mdata, *data;
sa = &sess->out_sa;
hdr_len = sizeof(*hdr);
dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
rlen = otx2_ipsec_po_out_rlen_get(sess, dlen - hdr_len);
extend_head = hdr_len + RTE_ETHER_HDR_LEN;
extend_tail = rlen - dlen;
mdata_len = m_info->lb_mlen + 8;
mdata = rte_pktmbuf_append(m_src, extend_tail + mdata_len);
if (unlikely(mdata == NULL)) {
otx2_err("Not enough tail room\n");
ret = -ENOMEM;
goto exit;
}
mdata += extend_tail; /* mdata follows encrypted data */
req = alloc_request_struct(mdata, (void *)cop, mdata_len);
data = rte_pktmbuf_prepend(m_src, extend_head);
if (unlikely(data == NULL)) {
otx2_err("Not enough head room\n");
ret = -ENOMEM;
goto exit;
}
/*
* Move the Ethernet header, to insert otx2_ipsec_po_out_hdr prior
* to the IP header
*/
memcpy(data, data + hdr_len, RTE_ETHER_HDR_LEN);
hdr = (struct otx2_ipsec_po_out_hdr *)rte_pktmbuf_adj(m_src,
RTE_ETHER_HDR_LEN);
memcpy(&hdr->iv[0], rte_crypto_op_ctod_offset(cop, uint8_t *,
sess->iv_offset), sess->iv_length);
/* Prepare CPT instruction */
word0.u64 = sess->ucmd_w0;
word0.s.dlen = dlen;
req->ist.ei0 = word0.u64;
req->ist.ei1 = rte_pktmbuf_iova(m_src);
req->ist.ei2 = req->ist.ei1;
sa->esn_hi = sess->seq_hi;
hdr->seq = rte_cpu_to_be_32(sess->seq_lo);
hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
sess->ip_id++;
sess->esn++;
exit:
*prep_req = req;
return ret;
}
static __rte_always_inline int
process_inb_sa(struct rte_crypto_op *cop,
struct otx2_sec_session_ipsec_lp *sess,
struct cpt_qp_meta_info *m_info, void **prep_req)
{
struct rte_crypto_sym_op *sym_op = cop->sym;
struct rte_mbuf *m_src = sym_op->m_src;
struct cpt_request_info *req = NULL;
int mdata_len, ret = 0;
vq_cmd_word0_t word0;
uint32_t dlen;
char *mdata;
dlen = rte_pktmbuf_pkt_len(m_src);
mdata_len = m_info->lb_mlen + 8;
mdata = rte_pktmbuf_append(m_src, mdata_len);
if (unlikely(mdata == NULL)) {
otx2_err("Not enough tail room\n");
ret = -ENOMEM;
goto exit;
}
req = alloc_request_struct(mdata, (void *)cop, mdata_len);
/* Prepare CPT instruction */
word0.u64 = sess->ucmd_w0;
word0.s.dlen = dlen;
req->ist.ei0 = word0.u64;
req->ist.ei1 = rte_pktmbuf_iova(m_src);
req->ist.ei2 = req->ist.ei1;
exit:
*prep_req = req;
return ret;
}
#endif /* __OTX2_IPSEC_PO_OPS_H__ */

View File

@ -1,37 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __OTX2_SECURITY_H__
#define __OTX2_SECURITY_H__
#include <rte_security.h>
#include "otx2_cryptodev_sec.h"
#include "otx2_ethdev_sec.h"
#define OTX2_SEC_AH_HDR_LEN 12
#define OTX2_SEC_AES_GCM_IV_LEN 8
#define OTX2_SEC_AES_GCM_MAC_LEN 16
#define OTX2_SEC_AES_CBC_IV_LEN 16
#define OTX2_SEC_SHA1_HMAC_LEN 12
#define OTX2_SEC_SHA2_HMAC_LEN 16
#define OTX2_SEC_AES_GCM_ROUNDUP_BYTE_LEN 4
#define OTX2_SEC_AES_CBC_ROUNDUP_BYTE_LEN 16
struct otx2_sec_session_ipsec {
union {
struct otx2_sec_session_ipsec_ip ip;
struct otx2_sec_session_ipsec_lp lp;
};
enum rte_security_ipsec_sa_direction dir;
};
struct otx2_sec_session {
struct otx2_sec_session_ipsec ipsec;
void *userdata;
/**< Userdata registered by the application */
} __rte_cache_aligned;
#endif /* __OTX2_SECURITY_H__ */

View File

@ -1,13 +0,0 @@
DPDK_22 {
local: *;
};
INTERNAL {
global:
otx2_cryptodev_driver_id;
otx2_cpt_af_reg_read;
otx2_cpt_af_reg_write;
local: *;
};

View File

@ -1127,6 +1127,16 @@ cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
} }
static const struct rte_pci_id cn9k_pci_sso_map[] = { static const struct rte_pci_id cn9k_pci_sso_map[] = {
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KB, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KC, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KD, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN9KE, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
{ {
.vendor_id = 0, .vendor_id = 0,
}, },

View File

@ -11,7 +11,6 @@ drivers = [
'dpaa', 'dpaa',
'dpaa2', 'dpaa2',
'dsw', 'dsw',
'octeontx2',
'opdl', 'opdl',
'skeleton', 'skeleton',
'sw', 'sw',

View File

@ -1,26 +0,0 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(C) 2019 Marvell International Ltd.
#
if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
build = false
reason = 'only supported on 64-bit Linux'
subdir_done()
endif
sources = files(
'otx2_worker.c',
'otx2_worker_dual.c',
'otx2_evdev.c',
'otx2_evdev_adptr.c',
'otx2_evdev_crypto_adptr.c',
'otx2_evdev_irq.c',
'otx2_evdev_selftest.c',
'otx2_tim_evdev.c',
'otx2_tim_worker.c',
)
deps += ['bus_pci', 'common_octeontx2', 'crypto_octeontx2', 'mempool_octeontx2', 'net_octeontx2']
includes += include_directories('../../crypto/octeontx2')
includes += include_directories('../../common/cpt')

File diff suppressed because it is too large Load Diff

View File

@ -1,430 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_EVDEV_H__
#define __OTX2_EVDEV_H__
#include <rte_eventdev.h>
#include <eventdev_pmd.h>
#include <rte_event_eth_rx_adapter.h>
#include <rte_event_eth_tx_adapter.h>
#include "otx2_common.h"
#include "otx2_dev.h"
#include "otx2_ethdev.h"
#include "otx2_mempool.h"
#include "otx2_tim_evdev.h"
#define EVENTDEV_NAME_OCTEONTX2_PMD event_octeontx2
#define sso_func_trace otx2_sso_dbg
#define OTX2_SSO_MAX_VHGRP RTE_EVENT_MAX_QUEUES_PER_DEV
#define OTX2_SSO_MAX_VHWS (UINT8_MAX)
#define OTX2_SSO_FC_NAME "otx2_evdev_xaq_fc"
#define OTX2_SSO_SQB_LIMIT (0x180)
#define OTX2_SSO_XAQ_SLACK (8)
#define OTX2_SSO_XAQ_CACHE_CNT (0x7)
#define OTX2_SSO_WQE_SG_PTR (9)
/* SSO LF register offsets (BAR2) */
#define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull)
#define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
#define SSO_LF_GGRP_QCTL (0x20ull)
#define SSO_LF_GGRP_EXE_DIS (0x80ull)
#define SSO_LF_GGRP_INT (0x100ull)
#define SSO_LF_GGRP_INT_W1S (0x108ull)
#define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
#define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
#define SSO_LF_GGRP_INT_THR (0x140ull)
#define SSO_LF_GGRP_INT_CNT (0x180ull)
#define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
#define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
#define SSO_LF_GGRP_AQ_THR (0x1e0ull)
#define SSO_LF_GGRP_MISC_CNT (0x200ull)
/* SSOW LF register offsets (BAR2) */
#define SSOW_LF_GWS_LINKS (0x10ull)
#define SSOW_LF_GWS_PENDWQP (0x40ull)
#define SSOW_LF_GWS_PENDSTATE (0x50ull)
#define SSOW_LF_GWS_NW_TIM (0x70ull)
#define SSOW_LF_GWS_GRPMSK_CHG (0x80ull)
#define SSOW_LF_GWS_INT (0x100ull)
#define SSOW_LF_GWS_INT_W1S (0x108ull)
#define SSOW_LF_GWS_INT_ENA_W1S (0x110ull)
#define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
#define SSOW_LF_GWS_TAG (0x200ull)
#define SSOW_LF_GWS_WQP (0x210ull)
#define SSOW_LF_GWS_SWTP (0x220ull)
#define SSOW_LF_GWS_PENDTAG (0x230ull)
#define SSOW_LF_GWS_OP_ALLOC_WE (0x400ull)
#define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
#define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
#define SSOW_LF_GWS_OP_SWTAG_UNTAG (0x810ull)
#define SSOW_LF_GWS_OP_SWTP_CLR (0x820ull)
#define SSOW_LF_GWS_OP_UPD_WQP_GRP0 (0x830ull)
#define SSOW_LF_GWS_OP_UPD_WQP_GRP1 (0x838ull)
#define SSOW_LF_GWS_OP_DESCHED (0x880ull)
#define SSOW_LF_GWS_OP_DESCHED_NOSCH (0x8c0ull)
#define SSOW_LF_GWS_OP_SWTAG_DESCHED (0x980ull)
#define SSOW_LF_GWS_OP_SWTAG_NOSCHED (0x9c0ull)
#define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xa00ull)
#define SSOW_LF_GWS_OP_CLR_NSCHED1 (0xa08ull)
#define SSOW_LF_GWS_OP_SWTP_SET (0xc00ull)
#define SSOW_LF_GWS_OP_SWTAG_NORM (0xc10ull)
#define SSOW_LF_GWS_OP_SWTAG_FULL0 (0xc20ull)
#define SSOW_LF_GWS_OP_SWTAG_FULL1 (0xc28ull)
#define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
#define OTX2_SSOW_GET_BASE_ADDR(_GW) ((_GW) - SSOW_LF_GWS_OP_GET_WORK)
#define OTX2_SSOW_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY)
#define OTX2_SSOW_GRP_FROM_TAG(x) (((x) >> 36) & 0x3ff)
#define NSEC2USEC(__ns) ((__ns) / 1E3)
#define USEC2NSEC(__us) ((__us) * 1E3)
#define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
#define TICK2NSEC(__tck, __freq) (((__tck) * 1E9) / (__freq))
enum otx2_sso_lf_type {
SSO_LF_GGRP,
SSO_LF_GWS
};
union otx2_sso_event {
uint64_t get_work0;
struct {
uint32_t flow_id:20;
uint32_t sub_event_type:8;
uint32_t event_type:4;
uint8_t op:2;
uint8_t rsvd:4;
uint8_t sched_type:2;
uint8_t queue_id;
uint8_t priority;
uint8_t impl_opaque;
};
} __rte_aligned(64);
enum {
SSO_SYNC_ORDERED,
SSO_SYNC_ATOMIC,
SSO_SYNC_UNTAGGED,
SSO_SYNC_EMPTY
};
struct otx2_sso_qos {
uint8_t queue;
uint8_t xaq_prcnt;
uint8_t taq_prcnt;
uint8_t iaq_prcnt;
};
struct otx2_sso_evdev {
OTX2_DEV; /* Base class */
uint8_t max_event_queues;
uint8_t max_event_ports;
uint8_t is_timeout_deq;
uint8_t nb_event_queues;
uint8_t nb_event_ports;
uint8_t configured;
uint32_t deq_tmo_ns;
uint32_t min_dequeue_timeout_ns;
uint32_t max_dequeue_timeout_ns;
int32_t max_num_events;
uint64_t *fc_mem;
uint64_t xaq_lmt;
uint64_t nb_xaq_cfg;
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
uint64_t rx_offloads;
uint64_t tx_offloads;
uint64_t adptr_xae_cnt;
uint16_t rx_adptr_pool_cnt;
uint64_t *rx_adptr_pools;
uint16_t max_port_id;
uint16_t tim_adptr_ring_cnt;
uint16_t *timer_adptr_rings;
uint64_t *timer_adptr_sz;
/* Dev args */
uint8_t dual_ws;
uint32_t xae_cnt;
uint8_t qos_queue_cnt;
uint8_t force_rx_bp;
struct otx2_sso_qos *qos_parse_data;
/* HW const */
uint32_t xae_waes;
uint32_t xaq_buf_size;
uint32_t iue;
/* MSIX offsets */
uint16_t sso_msixoff[OTX2_SSO_MAX_VHGRP];
uint16_t ssow_msixoff[OTX2_SSO_MAX_VHWS];
/* PTP timestamp */
struct otx2_timesync_info *tstamp;
} __rte_cache_aligned;
#define OTX2_SSOGWS_OPS \
/* WS ops */ \
uintptr_t getwrk_op; \
uintptr_t tag_op; \
uintptr_t wqp_op; \
uintptr_t swtag_flush_op; \
uintptr_t swtag_norm_op; \
uintptr_t swtag_desched_op;
/* Event port aka GWS */
struct otx2_ssogws {
/* Get Work Fastpath data */
OTX2_SSOGWS_OPS;
/* PTP timestamp */
struct otx2_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t port;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
/* Tx Fastpath data */
uint64_t base __rte_cache_aligned;
uint8_t tx_adptr_data[];
} __rte_cache_aligned;
struct otx2_ssogws_state {
OTX2_SSOGWS_OPS;
};
struct otx2_ssogws_dual {
/* Get Work Fastpath data */
struct otx2_ssogws_state ws_state[2]; /* Ping and Pong */
/* PTP timestamp */
struct otx2_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t vws; /* Ping pong bit */
uint8_t port;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
/* Tx Fastpath data */
uint64_t base[2] __rte_cache_aligned;
uint8_t tx_adptr_data[];
} __rte_cache_aligned;
static inline struct otx2_sso_evdev *
sso_pmd_priv(const struct rte_eventdev *event_dev)
{
return event_dev->data->dev_private;
}
struct otx2_ssogws_cookie {
const struct rte_eventdev *event_dev;
bool configured;
};
static inline struct otx2_ssogws_cookie *
ssogws_get_cookie(void *ws)
{
return (struct otx2_ssogws_cookie *)
((uint8_t *)ws - RTE_CACHE_LINE_SIZE);
}
static const union mbuf_initializer mbuf_init = {
.fields = {
.data_off = RTE_PKTMBUF_HEADROOM,
.refcnt = 1,
.nb_segs = 1,
.port = 0
}
};
static __rte_always_inline void
otx2_wqe_to_mbuf(uint64_t get_work1, const uint64_t mbuf, uint8_t port_id,
const uint32_t tag, const uint32_t flags,
const void * const lookup_mem)
{
struct nix_wqe_hdr_s *wqe = (struct nix_wqe_hdr_s *)get_work1;
uint64_t val = mbuf_init.value | (uint64_t)port_id << 48;
if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
val |= NIX_TIMESYNC_RX_OFFSET;
otx2_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
(struct rte_mbuf *)mbuf, lookup_mem,
val, flags);
}
static inline int
parse_kvargs_flag(const char *key, const char *value, void *opaque)
{
RTE_SET_USED(key);
*(uint8_t *)opaque = !!atoi(value);
return 0;
}
static inline int
parse_kvargs_value(const char *key, const char *value, void *opaque)
{
RTE_SET_USED(key);
*(uint32_t *)opaque = (uint32_t)atoi(value);
return 0;
}
#define SSO_RX_ADPTR_ENQ_FASTPATH_FUNC NIX_RX_FASTPATH_MODES
#define SSO_TX_ADPTR_ENQ_FASTPATH_FUNC NIX_TX_FASTPATH_MODES
/* Single WS API's */
uint16_t otx2_ssogws_enq(void *port, const struct rte_event *ev);
uint16_t otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
uint16_t nb_events);
uint16_t otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
uint16_t nb_events);
uint16_t otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
uint16_t nb_events);
/* Dual WS API's */
uint16_t otx2_ssogws_dual_enq(void *port, const struct rte_event *ev);
uint16_t otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
uint16_t nb_events);
uint16_t otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
uint16_t nb_events);
uint16_t otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
uint16_t nb_events);
/* Auto generated API's */
#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
uint16_t otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_deq_timeout_ ##name(void *port, \
struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_deq_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_deq_seg_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_deq_seg_timeout_ ##name(void *port, \
struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks); \
\
uint16_t otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_dual_deq_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_dual_deq_timeout_ ##name(void *port, \
struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_dual_deq_seg_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port, \
struct rte_event *ev, \
uint64_t timeout_ticks); \
uint16_t otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks);\
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],\
uint16_t nb_events); \
uint16_t otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events); \
uint16_t otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events); \
uint16_t otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events); \
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
uint32_t event_type);
int sso_xae_reconfigure(struct rte_eventdev *event_dev);
void sso_fastpath_fns_set(struct rte_eventdev *event_dev);
int otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
uint32_t *caps);
int otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
int otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t rx_queue_id);
int otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev);
int otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev);
int otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev,
uint32_t *caps);
int otx2_sso_tx_adapter_queue_add(uint8_t id,
const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id);
int otx2_sso_tx_adapter_queue_del(uint8_t id,
const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id);
/* Event crypto adapter API's */
int otx2_ca_caps_get(const struct rte_eventdev *dev,
const struct rte_cryptodev *cdev, uint32_t *caps);
int otx2_ca_qp_add(const struct rte_eventdev *dev,
const struct rte_cryptodev *cdev, int32_t queue_pair_id,
const struct rte_event *event);
int otx2_ca_qp_del(const struct rte_eventdev *dev,
const struct rte_cryptodev *cdev, int32_t queue_pair_id);
/* Clean up API's */
typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,
uintptr_t base, otx2_handle_event_t fn, void *arg);
void ssogws_reset(struct otx2_ssogws *ws);
/* Selftest */
int otx2_sso_selftest(void);
/* Init and Fini API's */
int otx2_sso_init(struct rte_eventdev *event_dev);
int otx2_sso_fini(struct rte_eventdev *event_dev);
/* IRQ handlers */
int sso_register_irqs(const struct rte_eventdev *event_dev);
void sso_unregister_irqs(const struct rte_eventdev *event_dev);
#endif /* __OTX2_EVDEV_H__ */

View File

@ -1,656 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019-2021 Marvell.
*/
#include "otx2_evdev.h"
#define NIX_RQ_AURA_THRESH(x) (((x)*95) / 100)
int
otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev, uint32_t *caps)
{
int rc;
RTE_SET_USED(event_dev);
rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
if (rc)
*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
else
*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ;
return 0;
}
static inline int
sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
uint16_t eth_port_id)
{
struct otx2_mbox *mbox = dev->mbox;
struct nix_aq_enq_req *aq;
int rc;
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_WRITE;
aq->cq.ena = 0;
aq->cq.caching = 0;
otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
aq->cq_mask.ena = ~(aq->cq_mask.ena);
aq->cq_mask.caching = ~(aq->cq_mask.caching);
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to disable cq context");
goto fail;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_WRITE;
aq->rq.sso_ena = 1;
aq->rq.sso_tt = tt;
aq->rq.sso_grp = ggrp;
aq->rq.ena_wqwd = 1;
/* Mbuf Header generation :
* > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
* it already has data related to mbuf size, headroom, private area.
* > Using WQE_SKIP we can directly assign
* mbuf = wqe - sizeof(struct mbuf);
* so that mbuf header will not have unpredicted values while headroom
* and private data starts at the beginning of wqe_data.
*/
aq->rq.wqe_skip = 1;
aq->rq.wqe_caching = 1;
aq->rq.spb_ena = 0;
aq->rq.flow_tagw = 20; /* 20-bits */
/* Flow Tag calculation :
*
* rq_tag <31:24> = good/bad_tag<8:0>;
* rq_tag <23:0> = [ltag]
*
* flow_tag_mask<31:0> = (1 << flow_tagw) - 1; <31:20>
* tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
*
* Setup :
* ltag<23:0> = (eth_port_id & 0xF) << 20;
* good/bad_tag<8:0> =
* ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
*
* TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
* <27:20> (eth_port_id) | <20:0> [TAG]
*/
aq->rq.ltag = (eth_port_id & 0xF) << 20;
aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
(RTE_EVENT_TYPE_ETHDEV << 4);
aq->rq.bad_utag = aq->rq.good_utag;
aq->rq.ena = 0; /* Don't enable RQ yet */
aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
/* mask the bits to write. */
aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
aq->rq_mask.ena = ~(aq->rq_mask.ena);
aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to init rx adapter context");
goto fail;
}
return 0;
fail:
return rc;
}
static inline int
sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
{
struct otx2_mbox *mbox = dev->mbox;
struct nix_aq_enq_req *aq;
int rc;
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = qid;
aq->ctype = NIX_AQ_CTYPE_CQ;
aq->op = NIX_AQ_INSTOP_WRITE;
aq->cq.ena = 1;
aq->cq.caching = 1;
otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
aq->cq_mask.ena = ~(aq->cq_mask.ena);
aq->cq_mask.caching = ~(aq->cq_mask.caching);
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to enable cq context");
goto fail;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
aq->qidx = qid;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_WRITE;
aq->rq.sso_ena = 0;
aq->rq.sso_tt = SSO_TT_UNTAGGED;
aq->rq.sso_grp = 0;
aq->rq.ena_wqwd = 0;
aq->rq.wqe_caching = 0;
aq->rq.wqe_skip = 0;
aq->rq.spb_ena = 0;
aq->rq.flow_tagw = 0x20;
aq->rq.ltag = 0;
aq->rq.good_utag = 0;
aq->rq.bad_utag = 0;
aq->rq.ena = 1;
aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
/* mask the bits to write. */
aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
aq->rq_mask.ena = ~(aq->rq_mask.ena);
aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
rc = otx2_mbox_process(mbox);
if (rc < 0) {
otx2_err("Failed to clear rx adapter context");
goto fail;
}
return 0;
fail:
return rc;
}
void
sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
{
int i;
switch (event_type) {
case RTE_EVENT_TYPE_ETHDEV:
{
struct otx2_eth_rxq *rxq = data;
uint64_t *old_ptr;
for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
return;
}
dev->rx_adptr_pool_cnt++;
old_ptr = dev->rx_adptr_pools;
dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
sizeof(uint64_t) *
dev->rx_adptr_pool_cnt, 0);
if (dev->rx_adptr_pools == NULL) {
dev->adptr_xae_cnt += rxq->pool->size;
dev->rx_adptr_pools = old_ptr;
dev->rx_adptr_pool_cnt--;
return;
}
dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
(uint64_t)rxq->pool;
dev->adptr_xae_cnt += rxq->pool->size;
break;
}
case RTE_EVENT_TYPE_TIMER:
{
struct otx2_tim_ring *timr = data;
uint16_t *old_ring_ptr;
uint64_t *old_sz_ptr;
for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
if (timr->ring_id != dev->timer_adptr_rings[i])
continue;
if (timr->nb_timers == dev->timer_adptr_sz[i])
return;
dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
dev->adptr_xae_cnt += timr->nb_timers;
dev->timer_adptr_sz[i] = timr->nb_timers;
return;
}
dev->tim_adptr_ring_cnt++;
old_ring_ptr = dev->timer_adptr_rings;
old_sz_ptr = dev->timer_adptr_sz;
dev->timer_adptr_rings = rte_realloc(dev->timer_adptr_rings,
sizeof(uint16_t) *
dev->tim_adptr_ring_cnt,
0);
if (dev->timer_adptr_rings == NULL) {
dev->adptr_xae_cnt += timr->nb_timers;
dev->timer_adptr_rings = old_ring_ptr;
dev->tim_adptr_ring_cnt--;
return;
}
dev->timer_adptr_sz = rte_realloc(dev->timer_adptr_sz,
sizeof(uint64_t) *
dev->tim_adptr_ring_cnt,
0);
if (dev->timer_adptr_sz == NULL) {
dev->adptr_xae_cnt += timr->nb_timers;
dev->timer_adptr_sz = old_sz_ptr;
dev->tim_adptr_ring_cnt--;
return;
}
dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
timr->ring_id;
dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
timr->nb_timers;
dev->adptr_xae_cnt += timr->nb_timers;
break;
}
default:
break;
}
}
static inline void
sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
int i;
for (i = 0; i < dev->nb_event_ports; i++) {
if (dev->dual_ws) {
struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
} else {
struct otx2_ssogws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
}
}
}
static inline void
sso_cfg_nix_mp_bpid(struct otx2_sso_evdev *dev,
struct otx2_eth_dev *otx2_eth_dev, struct otx2_eth_rxq *rxq,
uint8_t ena)
{
struct otx2_fc_info *fc = &otx2_eth_dev->fc_info;
struct npa_aq_enq_req *req;
struct npa_aq_enq_rsp *rsp;
struct otx2_npa_lf *lf;
struct otx2_mbox *mbox;
uint32_t limit;
int rc;
if (otx2_dev_is_sdp(otx2_eth_dev))
return;
lf = otx2_npa_lf_obj_get();
if (!lf)
return;
mbox = lf->mbox;
req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
if (req == NULL)
return;
req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_READ;
rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
if (rc)
return;
limit = rsp->aura.limit;
/* BP is already enabled. */
if (rsp->aura.bp_ena) {
/* If BP ids don't match disable BP. */
if ((rsp->aura.nix0_bpid != fc->bpid[0]) && !dev->force_rx_bp) {
req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
if (req == NULL)
return;
req->aura_id =
npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_WRITE;
req->aura.bp_ena = 0;
req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
otx2_mbox_process(mbox);
}
return;
}
/* BP was previously enabled but now disabled skip. */
if (rsp->aura.bp)
return;
req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
if (req == NULL)
return;
req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
req->ctype = NPA_AQ_CTYPE_AURA;
req->op = NPA_AQ_INSTOP_WRITE;
if (ena) {
req->aura.nix0_bpid = fc->bpid[0];
req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
req->aura.bp = NIX_RQ_AURA_THRESH(
limit > 128 ? 256 : limit); /* 95% of size*/
req->aura_mask.bp = ~(req->aura_mask.bp);
}
req->aura.bp_ena = !!ena;
req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
otx2_mbox_process(mbox);
}
int
otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
uint16_t port = eth_dev->data->port_id;
struct otx2_eth_rxq *rxq;
int i, rc;
rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
if (rc)
return -EINVAL;
if (rx_queue_id < 0) {
for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
rc = sso_xae_reconfigure(
(struct rte_eventdev *)(uintptr_t)event_dev);
rc |= sso_rxq_enable(otx2_eth_dev, i,
queue_conf->ev.sched_type,
queue_conf->ev.queue_id, port);
}
rxq = eth_dev->data->rx_queues[0];
sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
} else {
rxq = eth_dev->data->rx_queues[rx_queue_id];
sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
rc = sso_xae_reconfigure((struct rte_eventdev *)
(uintptr_t)event_dev);
rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
queue_conf->ev.sched_type,
queue_conf->ev.queue_id, port);
sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
}
if (rc < 0) {
otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
queue_conf->ev.queue_id);
return rc;
}
dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
dev->tstamp = &otx2_eth_dev->tstamp;
sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
}
int
otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t rx_queue_id)
{
struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
int i, rc;
rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
if (rc)
return -EINVAL;
if (rx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rc = sso_rxq_disable(otx2_eth_dev, i);
sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
eth_dev->data->rx_queues[i], false);
}
} else {
rc = sso_rxq_disable(otx2_eth_dev, (uint16_t)rx_queue_id);
sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
eth_dev->data->rx_queues[rx_queue_id],
false);
}
if (rc < 0)
otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
eth_dev->data->port_id, rx_queue_id);
return rc;
}
int
otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev)
{
RTE_SET_USED(event_dev);
RTE_SET_USED(eth_dev);
return 0;
}
int
otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev)
{
RTE_SET_USED(event_dev);
RTE_SET_USED(eth_dev);
return 0;
}
int
otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, uint32_t *caps)
{
int ret;
RTE_SET_USED(dev);
ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
if (ret)
*caps = 0;
else
*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
return 0;
}
static int
sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
{
struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
struct npa_aq_enq_req *aura_req;
aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
aura_req->ctype = NPA_AQ_CTYPE_AURA;
aura_req->op = NPA_AQ_INSTOP_WRITE;
aura_req->aura.limit = nb_sqb_bufs;
aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
return otx2_mbox_process(npa_lf->mbox);
}
static int
sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
uint16_t eth_port_id, uint16_t tx_queue_id,
struct otx2_eth_txq *txq)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
int i;
for (i = 0; i < event_dev->data->nb_ports; i++) {
dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
if (dev->dual_ws) {
struct otx2_ssogws_dual *old_dws;
struct otx2_ssogws_dual *dws;
old_dws = event_dev->data->ports[i];
dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
sizeof(struct otx2_ssogws_dual)
+ RTE_CACHE_LINE_SIZE +
(sizeof(uint64_t) *
(dev->max_port_id + 1) *
RTE_MAX_QUEUES_PER_PORT),
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
if (dws == NULL)
return -ENOMEM;
/* First cache line is reserved for cookie */
dws = (struct otx2_ssogws_dual *)
((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
)&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
(uint64_t)txq;
event_dev->data->ports[i] = dws;
} else {
struct otx2_ssogws *old_ws;
struct otx2_ssogws *ws;
old_ws = event_dev->data->ports[i];
ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
sizeof(struct otx2_ssogws) +
RTE_CACHE_LINE_SIZE +
(sizeof(uint64_t) *
(dev->max_port_id + 1) *
RTE_MAX_QUEUES_PER_PORT),
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
if (ws == NULL)
return -ENOMEM;
/* First cache line is reserved for cookie */
ws = (struct otx2_ssogws *)
((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
)&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
(uint64_t)txq;
event_dev->data->ports[i] = ws;
}
}
return 0;
}
int
otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id)
{
struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
struct otx2_eth_txq *txq;
int i, ret;
RTE_SET_USED(id);
if (tx_queue_id < 0) {
for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
sso_sqb_aura_limit_edit(txq->sqb_pool,
OTX2_SSO_SQB_LIMIT);
ret = sso_add_tx_queue_data(event_dev,
eth_dev->data->port_id, i,
txq);
if (ret < 0)
return ret;
}
} else {
txq = eth_dev->data->tx_queues[tx_queue_id];
sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
ret = sso_add_tx_queue_data(event_dev, eth_dev->data->port_id,
tx_queue_id, txq);
if (ret < 0)
return ret;
}
dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
}
int
otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t tx_queue_id)
{
struct otx2_eth_txq *txq;
int i;
RTE_SET_USED(id);
RTE_SET_USED(eth_dev);
RTE_SET_USED(event_dev);
if (tx_queue_id < 0) {
for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
sso_sqb_aura_limit_edit(txq->sqb_pool,
txq->nb_sqb_bufs);
}
} else {
txq = eth_dev->data->tx_queues[tx_queue_id];
sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);
}
return 0;
}

View File

@ -1,132 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020-2021 Marvell.
*/
#include <cryptodev_pmd.h>
#include <rte_eventdev.h>
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_qp.h"
#include "otx2_cryptodev_mbox.h"
#include "otx2_evdev.h"
int
otx2_ca_caps_get(const struct rte_eventdev *dev,
const struct rte_cryptodev *cdev, uint32_t *caps)
{
RTE_SET_USED(dev);
RTE_SET_USED(cdev);
*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND |
RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW |
RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
return 0;
}
static int
otx2_ca_qp_sso_link(const struct rte_cryptodev *cdev, struct otx2_cpt_qp *qp,
uint16_t sso_pf_func)
{
union otx2_cpt_af_lf_ctl2 af_lf_ctl2;
int ret;
ret = otx2_cpt_af_reg_read(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
qp->blkaddr, &af_lf_ctl2.u);
if (ret)
return ret;
af_lf_ctl2.s.sso_pf_func = sso_pf_func;
ret = otx2_cpt_af_reg_write(cdev, OTX2_CPT_AF_LF_CTL2(qp->id),
qp->blkaddr, af_lf_ctl2.u);
return ret;
}
static void
otx2_ca_qp_init(struct otx2_cpt_qp *qp, const struct rte_event *event)
{
if (event) {
qp->qp_ev_bind = 1;
rte_memcpy(&qp->ev, event, sizeof(struct rte_event));
} else {
qp->qp_ev_bind = 0;
}
qp->ca_enable = 1;
}
int
otx2_ca_qp_add(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
int32_t queue_pair_id, const struct rte_event *event)
{
struct otx2_sso_evdev *sso_evdev = sso_pmd_priv(dev);
struct otx2_cpt_vf *vf = cdev->data->dev_private;
uint16_t sso_pf_func = otx2_sso_pf_func_get();
struct otx2_cpt_qp *qp;
uint8_t qp_id;
int ret;
if (queue_pair_id == -1) {
for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
if (ret) {
uint8_t qp_tmp;
for (qp_tmp = 0; qp_tmp < qp_id; qp_tmp++)
otx2_ca_qp_del(dev, cdev, qp_tmp);
return ret;
}
otx2_ca_qp_init(qp, event);
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
ret = otx2_ca_qp_sso_link(cdev, qp, sso_pf_func);
if (ret)
return ret;
otx2_ca_qp_init(qp, event);
}
sso_evdev->rx_offloads |= NIX_RX_OFFLOAD_SECURITY_F;
sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev);
/* Update crypto adapter xae count */
if (queue_pair_id == -1)
sso_evdev->adptr_xae_cnt +=
vf->nb_queues * OTX2_CPT_DEFAULT_CMD_QLEN;
else
sso_evdev->adptr_xae_cnt += OTX2_CPT_DEFAULT_CMD_QLEN;
sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)dev);
return 0;
}
int
otx2_ca_qp_del(const struct rte_eventdev *dev, const struct rte_cryptodev *cdev,
int32_t queue_pair_id)
{
struct otx2_cpt_vf *vf = cdev->data->dev_private;
struct otx2_cpt_qp *qp;
uint8_t qp_id;
int ret;
RTE_SET_USED(dev);
ret = 0;
if (queue_pair_id == -1) {
for (qp_id = 0; qp_id < vf->nb_queues; qp_id++) {
qp = cdev->data->queue_pairs[qp_id];
ret = otx2_ca_qp_sso_link(cdev, qp, 0);
if (ret)
return ret;
qp->ca_enable = 0;
}
} else {
qp = cdev->data->queue_pairs[queue_pair_id];
ret = otx2_ca_qp_sso_link(cdev, qp, 0);
if (ret)
return ret;
qp->ca_enable = 0;
}
return 0;
}

View File

@ -1,77 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_
#define _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_
#include <rte_cryptodev.h>
#include <cryptodev_pmd.h>
#include <rte_eventdev.h>
#include "cpt_pmd_logs.h"
#include "cpt_ucode.h"
#include "otx2_cryptodev.h"
#include "otx2_cryptodev_hw_access.h"
#include "otx2_cryptodev_ops_helper.h"
#include "otx2_cryptodev_qp.h"
static inline void
otx2_ca_deq_post_process(const struct otx2_cpt_qp *qp,
struct rte_crypto_op *cop, uintptr_t *rsp,
uint8_t cc)
{
if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
if (likely(cc == NO_ERR)) {
/* Verify authentication data if required */
if (unlikely(rsp[2]))
compl_auth_verify(cop, (uint8_t *)rsp[2],
rsp[3]);
else
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
if (cc == ERR_GC_ICV_MISCOMPARE)
cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
else
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
sym_session_clear(otx2_cryptodev_driver_id,
cop->sym->session);
memset(cop->sym->session, 0,
rte_cryptodev_sym_get_existing_header_session_size(
cop->sym->session));
rte_mempool_put(qp->sess_mp, cop->sym->session);
cop->sym->session = NULL;
}
}
}
static inline uint64_t
otx2_handle_crypto_event(uint64_t get_work1)
{
struct cpt_request_info *req;
const struct otx2_cpt_qp *qp;
struct rte_crypto_op *cop;
uintptr_t *rsp;
void *metabuf;
uint8_t cc;
req = (struct cpt_request_info *)(get_work1);
cc = otx2_cpt_compcode_get(req);
qp = req->qp;
rsp = req->op;
metabuf = (void *)rsp[0];
cop = (void *)rsp[1];
otx2_ca_deq_post_process(qp, cop, rsp, cc);
rte_mempool_put(qp->meta_info.pool, metabuf);
return (uint64_t)(cop);
}
#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_RX_H_ */

View File

@ -1,83 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2021 Marvell International Ltd.
*/
#ifndef _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_
#define _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_
#include <rte_cryptodev.h>
#include <cryptodev_pmd.h>
#include <rte_event_crypto_adapter.h>
#include <rte_eventdev.h>
#include <otx2_cryptodev_qp.h>
#include <otx2_worker.h>
static inline uint16_t
otx2_ca_enq(uintptr_t tag_op, const struct rte_event *ev)
{
union rte_event_crypto_metadata *m_data;
struct rte_crypto_op *crypto_op;
struct rte_cryptodev *cdev;
struct otx2_cpt_qp *qp;
uint8_t cdev_id;
uint16_t qp_id;
crypto_op = ev->event_ptr;
if (crypto_op == NULL)
return 0;
if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
m_data = rte_cryptodev_sym_session_get_user_data(
crypto_op->sym->session);
if (m_data == NULL)
goto free_op;
cdev_id = m_data->request_info.cdev_id;
qp_id = m_data->request_info.queue_pair_id;
} else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
crypto_op->private_data_offset) {
m_data = (union rte_event_crypto_metadata *)
((uint8_t *)crypto_op +
crypto_op->private_data_offset);
cdev_id = m_data->request_info.cdev_id;
qp_id = m_data->request_info.queue_pair_id;
} else {
goto free_op;
}
cdev = &rte_cryptodevs[cdev_id];
qp = cdev->data->queue_pairs[qp_id];
if (!ev->sched_type)
otx2_ssogws_head_wait(tag_op);
if (qp->ca_enable)
return cdev->enqueue_burst(qp, &crypto_op, 1);
free_op:
rte_pktmbuf_free(crypto_op->sym->m_src);
rte_crypto_op_free(crypto_op);
rte_errno = EINVAL;
return 0;
}
static uint16_t __rte_hot
otx2_ssogws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
{
struct otx2_ssogws *ws = port;
RTE_SET_USED(nb_events);
return otx2_ca_enq(ws->tag_op, ev);
}
static uint16_t __rte_hot
otx2_ssogws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
{
struct otx2_ssogws_dual *ws = port;
RTE_SET_USED(nb_events);
return otx2_ca_enq(ws->ws_state[!ws->vws].tag_op, ev);
}
#endif /* _OTX2_EVDEV_CRYPTO_ADPTR_TX_H_ */

View File

@ -1,272 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include "otx2_evdev.h"
#include "otx2_tim_evdev.h"
static void
sso_lf_irq(void *param)
{
uintptr_t base = (uintptr_t)param;
uint64_t intr;
uint8_t ggrp;
ggrp = (base >> 12) & 0xFF;
intr = otx2_read64(base + SSO_LF_GGRP_INT);
if (intr == 0)
return;
otx2_err("GGRP %d GGRP_INT=0x%" PRIx64 "", ggrp, intr);
/* Clear interrupt */
otx2_write64(intr, base + SSO_LF_GGRP_INT);
}
static int
sso_lf_register_irq(const struct rte_eventdev *event_dev, uint16_t ggrp_msixoff,
uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
struct rte_intr_handle *handle = pci_dev->intr_handle;
int rc, vec;
vec = ggrp_msixoff + SSO_LF_INT_VEC_GRP;
/* Clear err interrupt */
otx2_write64(~0ull, base + SSO_LF_GGRP_INT_ENA_W1C);
/* Set used interrupt vectors */
rc = otx2_register_irq(handle, sso_lf_irq, (void *)base, vec);
/* Enable hw interrupt */
otx2_write64(~0ull, base + SSO_LF_GGRP_INT_ENA_W1S);
return rc;
}
static void
ssow_lf_irq(void *param)
{
uintptr_t base = (uintptr_t)param;
uint8_t gws = (base >> 12) & 0xFF;
uint64_t intr;
intr = otx2_read64(base + SSOW_LF_GWS_INT);
if (intr == 0)
return;
otx2_err("GWS %d GWS_INT=0x%" PRIx64 "", gws, intr);
/* Clear interrupt */
otx2_write64(intr, base + SSOW_LF_GWS_INT);
}
static int
ssow_lf_register_irq(const struct rte_eventdev *event_dev, uint16_t gws_msixoff,
uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
struct rte_intr_handle *handle = pci_dev->intr_handle;
int rc, vec;
vec = gws_msixoff + SSOW_LF_INT_VEC_IOP;
/* Clear err interrupt */
otx2_write64(~0ull, base + SSOW_LF_GWS_INT_ENA_W1C);
/* Set used interrupt vectors */
rc = otx2_register_irq(handle, ssow_lf_irq, (void *)base, vec);
/* Enable hw interrupt */
otx2_write64(~0ull, base + SSOW_LF_GWS_INT_ENA_W1S);
return rc;
}
static void
sso_lf_unregister_irq(const struct rte_eventdev *event_dev,
uint16_t ggrp_msixoff, uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
struct rte_intr_handle *handle = pci_dev->intr_handle;
int vec;
vec = ggrp_msixoff + SSO_LF_INT_VEC_GRP;
/* Clear err interrupt */
otx2_write64(~0ull, base + SSO_LF_GGRP_INT_ENA_W1C);
otx2_unregister_irq(handle, sso_lf_irq, (void *)base, vec);
}
static void
ssow_lf_unregister_irq(const struct rte_eventdev *event_dev,
uint16_t gws_msixoff, uintptr_t base)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(event_dev->dev);
struct rte_intr_handle *handle = pci_dev->intr_handle;
int vec;
vec = gws_msixoff + SSOW_LF_INT_VEC_IOP;
/* Clear err interrupt */
otx2_write64(~0ull, base + SSOW_LF_GWS_INT_ENA_W1C);
otx2_unregister_irq(handle, ssow_lf_irq, (void *)base, vec);
}
int
sso_register_irqs(const struct rte_eventdev *event_dev)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
int i, rc = -EINVAL;
uint8_t nb_ports;
nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
for (i = 0; i < dev->nb_event_queues; i++) {
if (dev->sso_msixoff[i] == MSIX_VECTOR_INVALID) {
otx2_err("Invalid SSOLF MSIX offset[%d] vector: 0x%x",
i, dev->sso_msixoff[i]);
goto fail;
}
}
for (i = 0; i < nb_ports; i++) {
if (dev->ssow_msixoff[i] == MSIX_VECTOR_INVALID) {
otx2_err("Invalid SSOWLF MSIX offset[%d] vector: 0x%x",
i, dev->ssow_msixoff[i]);
goto fail;
}
}
for (i = 0; i < dev->nb_event_queues; i++) {
uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 |
i << 12);
rc = sso_lf_register_irq(event_dev, dev->sso_msixoff[i], base);
}
for (i = 0; i < nb_ports; i++) {
uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 |
i << 12);
rc = ssow_lf_register_irq(event_dev, dev->ssow_msixoff[i],
base);
}
fail:
return rc;
}
void
sso_unregister_irqs(const struct rte_eventdev *event_dev)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
uint8_t nb_ports;
int i;
nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
for (i = 0; i < dev->nb_event_queues; i++) {
uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 |
i << 12);
sso_lf_unregister_irq(event_dev, dev->sso_msixoff[i], base);
}
for (i = 0; i < nb_ports; i++) {
uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 |
i << 12);
ssow_lf_unregister_irq(event_dev, dev->ssow_msixoff[i], base);
}
}
static void
tim_lf_irq(void *param)
{
uintptr_t base = (uintptr_t)param;
uint64_t intr;
uint8_t ring;
ring = (base >> 12) & 0xFF;
intr = otx2_read64(base + TIM_LF_NRSPERR_INT);
otx2_err("TIM RING %d TIM_LF_NRSPERR_INT=0x%" PRIx64 "", ring, intr);
intr = otx2_read64(base + TIM_LF_RAS_INT);
otx2_err("TIM RING %d TIM_LF_RAS_INT=0x%" PRIx64 "", ring, intr);
/* Clear interrupt */
otx2_write64(intr, base + TIM_LF_NRSPERR_INT);
otx2_write64(intr, base + TIM_LF_RAS_INT);
}
static int
tim_lf_register_irq(struct rte_pci_device *pci_dev, uint16_t tim_msixoff,
uintptr_t base)
{
struct rte_intr_handle *handle = pci_dev->intr_handle;
int rc, vec;
vec = tim_msixoff + TIM_LF_INT_VEC_NRSPERR_INT;
/* Clear err interrupt */
otx2_write64(~0ull, base + TIM_LF_NRSPERR_INT);
/* Set used interrupt vectors */
rc = otx2_register_irq(handle, tim_lf_irq, (void *)base, vec);
/* Enable hw interrupt */
otx2_write64(~0ull, base + TIM_LF_NRSPERR_INT_ENA_W1S);
vec = tim_msixoff + TIM_LF_INT_VEC_RAS_INT;
/* Clear err interrupt */
otx2_write64(~0ull, base + TIM_LF_RAS_INT);
/* Set used interrupt vectors */
rc = otx2_register_irq(handle, tim_lf_irq, (void *)base, vec);
/* Enable hw interrupt */
otx2_write64(~0ull, base + TIM_LF_RAS_INT_ENA_W1S);
return rc;
}
static void
tim_lf_unregister_irq(struct rte_pci_device *pci_dev, uint16_t tim_msixoff,
uintptr_t base)
{
struct rte_intr_handle *handle = pci_dev->intr_handle;
int vec;
vec = tim_msixoff + TIM_LF_INT_VEC_NRSPERR_INT;
/* Clear err interrupt */
otx2_write64(~0ull, base + TIM_LF_NRSPERR_INT_ENA_W1C);
otx2_unregister_irq(handle, tim_lf_irq, (void *)base, vec);
vec = tim_msixoff + TIM_LF_INT_VEC_RAS_INT;
/* Clear err interrupt */
otx2_write64(~0ull, base + TIM_LF_RAS_INT_ENA_W1C);
otx2_unregister_irq(handle, tim_lf_irq, (void *)base, vec);
}
int
tim_register_irq(uint16_t ring_id)
{
struct otx2_tim_evdev *dev = tim_priv_get();
int rc = -EINVAL;
uintptr_t base;
if (dev->tim_msixoff[ring_id] == MSIX_VECTOR_INVALID) {
otx2_err("Invalid TIMLF MSIX offset[%d] vector: 0x%x",
ring_id, dev->tim_msixoff[ring_id]);
goto fail;
}
base = dev->bar2 + (RVU_BLOCK_ADDR_TIM << 20 | ring_id << 12);
rc = tim_lf_register_irq(dev->pci_dev, dev->tim_msixoff[ring_id], base);
fail:
return rc;
}
void
tim_unregister_irq(uint16_t ring_id)
{
struct otx2_tim_evdev *dev = tim_priv_get();
uintptr_t base;
base = dev->bar2 + (RVU_BLOCK_ADDR_TIM << 20 | ring_id << 12);
tim_lf_unregister_irq(dev->pci_dev, dev->tim_msixoff[ring_id], base);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,286 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_EVDEV_STATS_H__
#define __OTX2_EVDEV_STATS_H__
#include "otx2_evdev.h"
struct otx2_sso_xstats_name {
const char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
const size_t offset;
const uint64_t mask;
const uint8_t shift;
uint64_t reset_snap[OTX2_SSO_MAX_VHGRP];
};
static struct otx2_sso_xstats_name sso_hws_xstats[] = {
{"last_grp_serviced", offsetof(struct sso_hws_stats, arbitration),
0x3FF, 0, {0} },
{"affinity_arbitration_credits",
offsetof(struct sso_hws_stats, arbitration),
0xF, 16, {0} },
};
static struct otx2_sso_xstats_name sso_grp_xstats[] = {
{"wrk_sched", offsetof(struct sso_grp_stats, ws_pc), ~0x0, 0,
{0} },
{"xaq_dram", offsetof(struct sso_grp_stats, ext_pc), ~0x0,
0, {0} },
{"add_wrk", offsetof(struct sso_grp_stats, wa_pc), ~0x0, 0,
{0} },
{"tag_switch_req", offsetof(struct sso_grp_stats, ts_pc), ~0x0, 0,
{0} },
{"desched_req", offsetof(struct sso_grp_stats, ds_pc), ~0x0, 0,
{0} },
{"desched_wrk", offsetof(struct sso_grp_stats, dq_pc), ~0x0, 0,
{0} },
{"xaq_cached", offsetof(struct sso_grp_stats, aw_status), 0x3,
0, {0} },
{"work_inflight", offsetof(struct sso_grp_stats, aw_status), 0x3F,
16, {0} },
{"inuse_pages", offsetof(struct sso_grp_stats, page_cnt),
0xFFFFFFFF, 0, {0} },
};
#define OTX2_SSO_NUM_HWS_XSTATS RTE_DIM(sso_hws_xstats)
#define OTX2_SSO_NUM_GRP_XSTATS RTE_DIM(sso_grp_xstats)
#define OTX2_SSO_NUM_XSTATS (OTX2_SSO_NUM_HWS_XSTATS + OTX2_SSO_NUM_GRP_XSTATS)
static int
otx2_sso_xstats_get(const struct rte_eventdev *event_dev,
enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
const unsigned int ids[], uint64_t values[], unsigned int n)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
struct otx2_sso_xstats_name *xstats;
struct otx2_sso_xstats_name *xstat;
struct otx2_mbox *mbox = dev->mbox;
uint32_t xstats_mode_count = 0;
uint32_t start_offset = 0;
unsigned int i;
uint64_t value;
void *req_rsp;
int rc;
switch (mode) {
case RTE_EVENT_DEV_XSTATS_DEVICE:
return 0;
case RTE_EVENT_DEV_XSTATS_PORT:
if (queue_port_id >= (signed int)dev->nb_event_ports)
goto invalid_value;
xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
xstats = sso_hws_xstats;
req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
2 * queue_port_id : queue_port_id;
rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
if (rc < 0)
goto invalid_value;
if (dev->dual_ws) {
for (i = 0; i < n && i < xstats_mode_count; i++) {
xstat = &xstats[ids[i] - start_offset];
values[i] = *(uint64_t *)
((char *)req_rsp + xstat->offset);
values[i] = (values[i] >> xstat->shift) &
xstat->mask;
}
req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
((struct sso_info_req *)req_rsp)->hws =
(2 * queue_port_id) + 1;
rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
if (rc < 0)
goto invalid_value;
}
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
if (queue_port_id >= (signed int)dev->nb_event_queues)
goto invalid_value;
xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
start_offset = OTX2_SSO_NUM_HWS_XSTATS;
xstats = sso_grp_xstats;
req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
((struct sso_info_req *)req_rsp)->grp = queue_port_id;
rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
if (rc < 0)
goto invalid_value;
break;
default:
otx2_err("Invalid mode received");
goto invalid_value;
};
for (i = 0; i < n && i < xstats_mode_count; i++) {
xstat = &xstats[ids[i] - start_offset];
value = *(uint64_t *)((char *)req_rsp + xstat->offset);
value = (value >> xstat->shift) & xstat->mask;
if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
values[i] += value;
else
values[i] = value;
values[i] -= xstat->reset_snap[queue_port_id];
}
return i;
invalid_value:
return -EINVAL;
}
static int
otx2_sso_xstats_reset(struct rte_eventdev *event_dev,
enum rte_event_dev_xstats_mode mode,
int16_t queue_port_id, const uint32_t ids[], uint32_t n)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
struct otx2_sso_xstats_name *xstats;
struct otx2_sso_xstats_name *xstat;
struct otx2_mbox *mbox = dev->mbox;
uint32_t xstats_mode_count = 0;
uint32_t start_offset = 0;
unsigned int i;
uint64_t value;
void *req_rsp;
int rc;
switch (mode) {
case RTE_EVENT_DEV_XSTATS_DEVICE:
return 0;
case RTE_EVENT_DEV_XSTATS_PORT:
if (queue_port_id >= (signed int)dev->nb_event_ports)
goto invalid_value;
xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
xstats = sso_hws_xstats;
req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
2 * queue_port_id : queue_port_id;
rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
if (rc < 0)
goto invalid_value;
if (dev->dual_ws) {
for (i = 0; i < n && i < xstats_mode_count; i++) {
xstat = &xstats[ids[i] - start_offset];
xstat->reset_snap[queue_port_id] = *(uint64_t *)
((char *)req_rsp + xstat->offset);
xstat->reset_snap[queue_port_id] =
(xstat->reset_snap[queue_port_id] >>
xstat->shift) & xstat->mask;
}
req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
((struct sso_info_req *)req_rsp)->hws =
(2 * queue_port_id) + 1;
rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
if (rc < 0)
goto invalid_value;
}
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
if (queue_port_id >= (signed int)dev->nb_event_queues)
goto invalid_value;
xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
start_offset = OTX2_SSO_NUM_HWS_XSTATS;
xstats = sso_grp_xstats;
req_rsp = otx2_mbox_alloc_msg_sso_grp_get_stats(mbox);
((struct sso_info_req *)req_rsp)->grp = queue_port_id;
rc = otx2_mbox_process_msg(mbox, (void *)&req_rsp);
if (rc < 0)
goto invalid_value;
break;
default:
otx2_err("Invalid mode received");
goto invalid_value;
};
for (i = 0; i < n && i < xstats_mode_count; i++) {
xstat = &xstats[ids[i] - start_offset];
value = *(uint64_t *)((char *)req_rsp + xstat->offset);
value = (value >> xstat->shift) & xstat->mask;
if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
xstat->reset_snap[queue_port_id] += value;
else
xstat->reset_snap[queue_port_id] = value;
}
return i;
invalid_value:
return -EINVAL;
}
static int
otx2_sso_xstats_get_names(const struct rte_eventdev *event_dev,
enum rte_event_dev_xstats_mode mode,
uint8_t queue_port_id,
struct rte_event_dev_xstats_name *xstats_names,
unsigned int *ids, unsigned int size)
{
struct rte_event_dev_xstats_name xstats_names_copy[OTX2_SSO_NUM_XSTATS];
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
uint32_t xstats_mode_count = 0;
uint32_t start_offset = 0;
unsigned int xidx = 0;
unsigned int i;
for (i = 0; i < OTX2_SSO_NUM_HWS_XSTATS; i++) {
snprintf(xstats_names_copy[i].name,
sizeof(xstats_names_copy[i].name), "%s",
sso_hws_xstats[i].name);
}
for (; i < OTX2_SSO_NUM_XSTATS; i++) {
snprintf(xstats_names_copy[i].name,
sizeof(xstats_names_copy[i].name), "%s",
sso_grp_xstats[i - OTX2_SSO_NUM_HWS_XSTATS].name);
}
switch (mode) {
case RTE_EVENT_DEV_XSTATS_DEVICE:
break;
case RTE_EVENT_DEV_XSTATS_PORT:
if (queue_port_id >= (signed int)dev->nb_event_ports)
break;
xstats_mode_count = OTX2_SSO_NUM_HWS_XSTATS;
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
if (queue_port_id >= (signed int)dev->nb_event_queues)
break;
xstats_mode_count = OTX2_SSO_NUM_GRP_XSTATS;
start_offset = OTX2_SSO_NUM_HWS_XSTATS;
break;
default:
otx2_err("Invalid mode received");
return -EINVAL;
};
if (xstats_mode_count > size || !ids || !xstats_names)
return xstats_mode_count;
for (i = 0; i < xstats_mode_count; i++) {
xidx = i + start_offset;
strncpy(xstats_names[i].name, xstats_names_copy[xidx].name,
sizeof(xstats_names[i].name));
ids[i] = xidx;
}
return i;
}
#endif

View File

@ -1,735 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include <rte_kvargs.h>
#include <rte_malloc.h>
#include <rte_mbuf_pool_ops.h>
#include "otx2_evdev.h"
#include "otx2_tim_evdev.h"
static struct event_timer_adapter_ops otx2_tim_ops;
static inline int
tim_get_msix_offsets(void)
{
struct otx2_tim_evdev *dev = tim_priv_get();
struct otx2_mbox *mbox = dev->mbox;
struct msix_offset_rsp *msix_rsp;
int i, rc;
/* Get TIM MSIX vector offsets */
otx2_mbox_alloc_msg_msix_offset(mbox);
rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
for (i = 0; i < dev->nb_rings; i++)
dev->tim_msixoff[i] = msix_rsp->timlf_msixoff[i];
return rc;
}
static void
tim_set_fp_ops(struct otx2_tim_ring *tim_ring)
{
uint8_t prod_flag = !tim_ring->prod_type_sp;
/* [DFB/FB] [SP][MP]*/
const rte_event_timer_arm_burst_t arm_burst[2][2][2] = {
#define FP(_name, _f3, _f2, _f1, flags) \
[_f3][_f2][_f1] = otx2_tim_arm_burst_##_name,
TIM_ARM_FASTPATH_MODES
#undef FP
};
const rte_event_timer_arm_tmo_tick_burst_t arm_tmo_burst[2][2] = {
#define FP(_name, _f2, _f1, flags) \
[_f2][_f1] = otx2_tim_arm_tmo_tick_burst_##_name,
TIM_ARM_TMO_FASTPATH_MODES
#undef FP
};
otx2_tim_ops.arm_burst =
arm_burst[tim_ring->enable_stats][tim_ring->ena_dfb][prod_flag];
otx2_tim_ops.arm_tmo_tick_burst =
arm_tmo_burst[tim_ring->enable_stats][tim_ring->ena_dfb];
otx2_tim_ops.cancel_burst = otx2_tim_timer_cancel_burst;
}
static void
otx2_tim_ring_info_get(const struct rte_event_timer_adapter *adptr,
struct rte_event_timer_adapter_info *adptr_info)
{
struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
adptr_info->max_tmo_ns = tim_ring->max_tout;
adptr_info->min_resolution_ns = tim_ring->ena_periodic ?
tim_ring->max_tout : tim_ring->tck_nsec;
rte_memcpy(&adptr_info->conf, &adptr->data->conf,
sizeof(struct rte_event_timer_adapter_conf));
}
static int
tim_chnk_pool_create(struct otx2_tim_ring *tim_ring,
struct rte_event_timer_adapter_conf *rcfg)
{
unsigned int cache_sz = (tim_ring->nb_chunks / 1.5);
unsigned int mp_flags = 0;
char pool_name[25];
int rc;
cache_sz /= rte_lcore_count();
/* Create chunk pool. */
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
mp_flags = RTE_MEMPOOL_F_SP_PUT | RTE_MEMPOOL_F_SC_GET;
otx2_tim_dbg("Using single producer mode");
tim_ring->prod_type_sp = true;
}
snprintf(pool_name, sizeof(pool_name), "otx2_tim_chunk_pool%d",
tim_ring->ring_id);
if (cache_sz > RTE_MEMPOOL_CACHE_MAX_SIZE)
cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
cache_sz = cache_sz != 0 ? cache_sz : 2;
tim_ring->nb_chunks += (cache_sz * rte_lcore_count());
if (!tim_ring->disable_npa) {
tim_ring->chunk_pool = rte_mempool_create_empty(pool_name,
tim_ring->nb_chunks, tim_ring->chunk_sz,
cache_sz, 0, rte_socket_id(), mp_flags);
if (tim_ring->chunk_pool == NULL) {
otx2_err("Unable to create chunkpool.");
return -ENOMEM;
}
rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
rte_mbuf_platform_mempool_ops(),
NULL);
if (rc < 0) {
otx2_err("Unable to set chunkpool ops");
goto free;
}
rc = rte_mempool_populate_default(tim_ring->chunk_pool);
if (rc < 0) {
otx2_err("Unable to set populate chunkpool.");
goto free;
}
tim_ring->aura = npa_lf_aura_handle_to_aura(
tim_ring->chunk_pool->pool_id);
tim_ring->ena_dfb = tim_ring->ena_periodic ? 1 : 0;
} else {
tim_ring->chunk_pool = rte_mempool_create(pool_name,
tim_ring->nb_chunks, tim_ring->chunk_sz,
cache_sz, 0, NULL, NULL, NULL, NULL,
rte_socket_id(),
mp_flags);
if (tim_ring->chunk_pool == NULL) {
otx2_err("Unable to create chunkpool.");
return -ENOMEM;
}
tim_ring->ena_dfb = 1;
}
return 0;
free:
rte_mempool_free(tim_ring->chunk_pool);
return rc;
}
static void
tim_err_desc(int rc)
{
switch (rc) {
case TIM_AF_NO_RINGS_LEFT:
otx2_err("Unable to allocat new TIM ring.");
break;
case TIM_AF_INVALID_NPA_PF_FUNC:
otx2_err("Invalid NPA pf func.");
break;
case TIM_AF_INVALID_SSO_PF_FUNC:
otx2_err("Invalid SSO pf func.");
break;
case TIM_AF_RING_STILL_RUNNING:
otx2_tim_dbg("Ring busy.");
break;
case TIM_AF_LF_INVALID:
otx2_err("Invalid Ring id.");
break;
case TIM_AF_CSIZE_NOT_ALIGNED:
otx2_err("Chunk size specified needs to be multiple of 16.");
break;
case TIM_AF_CSIZE_TOO_SMALL:
otx2_err("Chunk size too small.");
break;
case TIM_AF_CSIZE_TOO_BIG:
otx2_err("Chunk size too big.");
break;
case TIM_AF_INTERVAL_TOO_SMALL:
otx2_err("Bucket traversal interval too small.");
break;
case TIM_AF_INVALID_BIG_ENDIAN_VALUE:
otx2_err("Invalid Big endian value.");
break;
case TIM_AF_INVALID_CLOCK_SOURCE:
otx2_err("Invalid Clock source specified.");
break;
case TIM_AF_GPIO_CLK_SRC_NOT_ENABLED:
otx2_err("GPIO clock source not enabled.");
break;
case TIM_AF_INVALID_BSIZE:
otx2_err("Invalid bucket size.");
break;
case TIM_AF_INVALID_ENABLE_PERIODIC:
otx2_err("Invalid bucket size.");
break;
case TIM_AF_INVALID_ENABLE_DONTFREE:
otx2_err("Invalid Don't free value.");
break;
case TIM_AF_ENA_DONTFRE_NSET_PERIODIC:
otx2_err("Don't free bit not set when periodic is enabled.");
break;
case TIM_AF_RING_ALREADY_DISABLED:
otx2_err("Ring already stopped");
break;
default:
otx2_err("Unknown Error.");
}
}
static int
otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
{
struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
struct otx2_tim_evdev *dev = tim_priv_get();
struct otx2_tim_ring *tim_ring;
struct tim_config_req *cfg_req;
struct tim_ring_req *free_req;
struct tim_lf_alloc_req *req;
struct tim_lf_alloc_rsp *rsp;
uint8_t is_periodic;
int i, rc;
if (dev == NULL)
return -ENODEV;
if (adptr->data->id >= dev->nb_rings)
return -ENODEV;
req = otx2_mbox_alloc_msg_tim_lf_alloc(dev->mbox);
req->npa_pf_func = otx2_npa_pf_func_get();
req->sso_pf_func = otx2_sso_pf_func_get();
req->ring = adptr->data->id;
rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
if (rc < 0) {
tim_err_desc(rc);
return -ENODEV;
}
if (NSEC2TICK(RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10),
rsp->tenns_clk) < OTX2_TIM_MIN_TMO_TKS) {
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
rcfg->timer_tick_ns = TICK2NSEC(OTX2_TIM_MIN_TMO_TKS,
rsp->tenns_clk);
else {
rc = -ERANGE;
goto rng_mem_err;
}
}
is_periodic = 0;
if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_PERIODIC) {
if (rcfg->max_tmo_ns &&
rcfg->max_tmo_ns != rcfg->timer_tick_ns) {
rc = -ERANGE;
goto rng_mem_err;
}
/* Use 2 buckets to avoid contention */
rcfg->max_tmo_ns = rcfg->timer_tick_ns;
rcfg->timer_tick_ns /= 2;
is_periodic = 1;
}
tim_ring = rte_zmalloc("otx2_tim_prv", sizeof(struct otx2_tim_ring), 0);
if (tim_ring == NULL) {
rc = -ENOMEM;
goto rng_mem_err;
}
adptr->data->adapter_priv = tim_ring;
tim_ring->tenns_clk_freq = rsp->tenns_clk;
tim_ring->clk_src = (int)rcfg->clk_src;
tim_ring->ring_id = adptr->data->id;
tim_ring->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
tim_ring->max_tout = is_periodic ?
rcfg->timer_tick_ns * 2 : rcfg->max_tmo_ns;
tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
tim_ring->chunk_sz = dev->chunk_sz;
tim_ring->nb_timers = rcfg->nb_timers;
tim_ring->disable_npa = dev->disable_npa;
tim_ring->ena_periodic = is_periodic;
tim_ring->enable_stats = dev->enable_stats;
for (i = 0; i < dev->ring_ctl_cnt ; i++) {
struct otx2_tim_ctl *ring_ctl = &dev->ring_ctl_data[i];
if (ring_ctl->ring == tim_ring->ring_id) {
tim_ring->chunk_sz = ring_ctl->chunk_slots ?
((uint32_t)(ring_ctl->chunk_slots + 1) *
OTX2_TIM_CHUNK_ALIGNMENT) : tim_ring->chunk_sz;
tim_ring->enable_stats = ring_ctl->enable_stats;
tim_ring->disable_npa = ring_ctl->disable_npa;
}
}
if (tim_ring->disable_npa) {
tim_ring->nb_chunks =
tim_ring->nb_timers /
OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
} else {
tim_ring->nb_chunks = tim_ring->nb_timers;
}
tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
tim_ring->bkt = rte_zmalloc("otx2_tim_bucket", (tim_ring->nb_bkts) *
sizeof(struct otx2_tim_bkt),
RTE_CACHE_LINE_SIZE);
if (tim_ring->bkt == NULL)
goto bkt_mem_err;
rc = tim_chnk_pool_create(tim_ring, rcfg);
if (rc < 0)
goto chnk_mem_err;
cfg_req = otx2_mbox_alloc_msg_tim_config_ring(dev->mbox);
cfg_req->ring = tim_ring->ring_id;
cfg_req->bigendian = false;
cfg_req->clocksource = tim_ring->clk_src;
cfg_req->enableperiodic = tim_ring->ena_periodic;
cfg_req->enabledontfreebuffer = tim_ring->ena_dfb;
cfg_req->bucketsize = tim_ring->nb_bkts;
cfg_req->chunksize = tim_ring->chunk_sz;
cfg_req->interval = NSEC2TICK(tim_ring->tck_nsec,
tim_ring->tenns_clk_freq);
rc = otx2_mbox_process(dev->mbox);
if (rc < 0) {
tim_err_desc(rc);
goto chnk_mem_err;
}
tim_ring->base = dev->bar2 +
(RVU_BLOCK_ADDR_TIM << 20 | tim_ring->ring_id << 12);
rc = tim_register_irq(tim_ring->ring_id);
if (rc < 0)
goto chnk_mem_err;
otx2_write64((uint64_t)tim_ring->bkt,
tim_ring->base + TIM_LF_RING_BASE);
otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
/* Set fastpath ops. */
tim_set_fp_ops(tim_ring);
/* Update SSO xae count. */
sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)tim_ring,
RTE_EVENT_TYPE_TIMER);
sso_xae_reconfigure(dev->event_dev);
otx2_tim_dbg("Total memory used %"PRIu64"MB\n",
(uint64_t)(((tim_ring->nb_chunks * tim_ring->chunk_sz)
+ (tim_ring->nb_bkts * sizeof(struct otx2_tim_bkt))) /
BIT_ULL(20)));
return rc;
chnk_mem_err:
rte_free(tim_ring->bkt);
bkt_mem_err:
rte_free(tim_ring);
rng_mem_err:
free_req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
free_req->ring = adptr->data->id;
otx2_mbox_process(dev->mbox);
return rc;
}
static void
otx2_tim_calibrate_start_tsc(struct otx2_tim_ring *tim_ring)
{
#define OTX2_TIM_CALIB_ITER 1E6
uint32_t real_bkt, bucket;
int icount, ecount = 0;
uint64_t bkt_cyc;
for (icount = 0; icount < OTX2_TIM_CALIB_ITER; icount++) {
real_bkt = otx2_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
bkt_cyc = tim_cntvct();
bucket = (bkt_cyc - tim_ring->ring_start_cyc) /
tim_ring->tck_int;
bucket = bucket % (tim_ring->nb_bkts);
tim_ring->ring_start_cyc = bkt_cyc - (real_bkt *
tim_ring->tck_int);
if (bucket != real_bkt)
ecount++;
}
tim_ring->last_updt_cyc = bkt_cyc;
otx2_tim_dbg("Bucket mispredict %3.2f distance %d\n",
100 - (((double)(icount - ecount) / (double)icount) * 100),
bucket - real_bkt);
}
static int
otx2_tim_ring_start(const struct rte_event_timer_adapter *adptr)
{
struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
struct otx2_tim_evdev *dev = tim_priv_get();
struct tim_enable_rsp *rsp;
struct tim_ring_req *req;
int rc;
if (dev == NULL)
return -ENODEV;
req = otx2_mbox_alloc_msg_tim_enable_ring(dev->mbox);
req->ring = tim_ring->ring_id;
rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
if (rc < 0) {
tim_err_desc(rc);
goto fail;
}
tim_ring->ring_start_cyc = rsp->timestarted;
tim_ring->tck_int = NSEC2TICK(tim_ring->tck_nsec, tim_cntfrq());
tim_ring->tot_int = tim_ring->tck_int * tim_ring->nb_bkts;
tim_ring->fast_div = rte_reciprocal_value_u64(tim_ring->tck_int);
tim_ring->fast_bkt = rte_reciprocal_value_u64(tim_ring->nb_bkts);
otx2_tim_calibrate_start_tsc(tim_ring);
fail:
return rc;
}
static int
otx2_tim_ring_stop(const struct rte_event_timer_adapter *adptr)
{
struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
struct otx2_tim_evdev *dev = tim_priv_get();
struct tim_ring_req *req;
int rc;
if (dev == NULL)
return -ENODEV;
req = otx2_mbox_alloc_msg_tim_disable_ring(dev->mbox);
req->ring = tim_ring->ring_id;
rc = otx2_mbox_process(dev->mbox);
if (rc < 0) {
tim_err_desc(rc);
rc = -EBUSY;
}
return rc;
}
static int
otx2_tim_ring_free(struct rte_event_timer_adapter *adptr)
{
struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
struct otx2_tim_evdev *dev = tim_priv_get();
struct tim_ring_req *req;
int rc;
if (dev == NULL)
return -ENODEV;
tim_unregister_irq(tim_ring->ring_id);
req = otx2_mbox_alloc_msg_tim_lf_free(dev->mbox);
req->ring = tim_ring->ring_id;
rc = otx2_mbox_process(dev->mbox);
if (rc < 0) {
tim_err_desc(rc);
return -EBUSY;
}
rte_free(tim_ring->bkt);
rte_mempool_free(tim_ring->chunk_pool);
rte_free(adptr->data->adapter_priv);
return 0;
}
static int
otx2_tim_stats_get(const struct rte_event_timer_adapter *adapter,
struct rte_event_timer_adapter_stats *stats)
{
struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
uint64_t bkt_cyc = tim_cntvct() - tim_ring->ring_start_cyc;
stats->evtim_exp_count = __atomic_load_n(&tim_ring->arm_cnt,
__ATOMIC_RELAXED);
stats->ev_enq_count = stats->evtim_exp_count;
stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
&tim_ring->fast_div);
return 0;
}
static int
otx2_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
{
struct otx2_tim_ring *tim_ring = adapter->data->adapter_priv;
__atomic_store_n(&tim_ring->arm_cnt, 0, __ATOMIC_RELAXED);
return 0;
}
int
otx2_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
uint32_t *caps, const struct event_timer_adapter_ops **ops)
{
struct otx2_tim_evdev *dev = tim_priv_get();
RTE_SET_USED(flags);
if (dev == NULL)
return -ENODEV;
otx2_tim_ops.init = otx2_tim_ring_create;
otx2_tim_ops.uninit = otx2_tim_ring_free;
otx2_tim_ops.start = otx2_tim_ring_start;
otx2_tim_ops.stop = otx2_tim_ring_stop;
otx2_tim_ops.get_info = otx2_tim_ring_info_get;
if (dev->enable_stats) {
otx2_tim_ops.stats_get = otx2_tim_stats_get;
otx2_tim_ops.stats_reset = otx2_tim_stats_reset;
}
/* Store evdev pointer for later use. */
dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev;
*caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT |
RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC;
*ops = &otx2_tim_ops;
return 0;
}
#define OTX2_TIM_DISABLE_NPA "tim_disable_npa"
#define OTX2_TIM_CHNK_SLOTS "tim_chnk_slots"
#define OTX2_TIM_STATS_ENA "tim_stats_ena"
#define OTX2_TIM_RINGS_LMT "tim_rings_lmt"
#define OTX2_TIM_RING_CTL "tim_ring_ctl"
static void
tim_parse_ring_param(char *value, void *opaque)
{
struct otx2_tim_evdev *dev = opaque;
struct otx2_tim_ctl ring_ctl = {0};
char *tok = strtok(value, "-");
struct otx2_tim_ctl *old_ptr;
uint16_t *val;
val = (uint16_t *)&ring_ctl;
if (!strlen(value))
return;
while (tok != NULL) {
*val = atoi(tok);
tok = strtok(NULL, "-");
val++;
}
if (val != (&ring_ctl.enable_stats + 1)) {
otx2_err(
"Invalid ring param expected [ring-chunk_sz-disable_npa-enable_stats]");
return;
}
dev->ring_ctl_cnt++;
old_ptr = dev->ring_ctl_data;
dev->ring_ctl_data = rte_realloc(dev->ring_ctl_data,
sizeof(struct otx2_tim_ctl) *
dev->ring_ctl_cnt, 0);
if (dev->ring_ctl_data == NULL) {
dev->ring_ctl_data = old_ptr;
dev->ring_ctl_cnt--;
return;
}
dev->ring_ctl_data[dev->ring_ctl_cnt - 1] = ring_ctl;
}
static void
tim_parse_ring_ctl_list(const char *value, void *opaque)
{
char *s = strdup(value);
char *start = NULL;
char *end = NULL;
char *f = s;
while (*s) {
if (*s == '[')
start = s;
else if (*s == ']')
end = s;
if (start && start < end) {
*end = 0;
tim_parse_ring_param(start + 1, opaque);
start = end;
s = end;
}
s++;
}
free(f);
}
static int
tim_parse_kvargs_dict(const char *key, const char *value, void *opaque)
{
RTE_SET_USED(key);
/* Dict format [ring-chunk_sz-disable_npa-enable_stats] use '-' as ','
* isn't allowed. 0 represents default.
*/
tim_parse_ring_ctl_list(value, opaque);
return 0;
}
static void
tim_parse_devargs(struct rte_devargs *devargs, struct otx2_tim_evdev *dev)
{
struct rte_kvargs *kvlist;
if (devargs == NULL)
return;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
return;
rte_kvargs_process(kvlist, OTX2_TIM_DISABLE_NPA,
&parse_kvargs_flag, &dev->disable_npa);
rte_kvargs_process(kvlist, OTX2_TIM_CHNK_SLOTS,
&parse_kvargs_value, &dev->chunk_slots);
rte_kvargs_process(kvlist, OTX2_TIM_STATS_ENA, &parse_kvargs_flag,
&dev->enable_stats);
rte_kvargs_process(kvlist, OTX2_TIM_RINGS_LMT, &parse_kvargs_value,
&dev->min_ring_cnt);
rte_kvargs_process(kvlist, OTX2_TIM_RING_CTL,
&tim_parse_kvargs_dict, &dev);
rte_kvargs_free(kvlist);
}
void
otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev)
{
struct rsrc_attach_req *atch_req;
struct rsrc_detach_req *dtch_req;
struct free_rsrcs_rsp *rsrc_cnt;
const struct rte_memzone *mz;
struct otx2_tim_evdev *dev;
int rc;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
mz = rte_memzone_reserve(RTE_STR(OTX2_TIM_EVDEV_NAME),
sizeof(struct otx2_tim_evdev),
rte_socket_id(), 0);
if (mz == NULL) {
otx2_tim_dbg("Unable to allocate memory for TIM Event device");
return;
}
dev = mz->addr;
dev->pci_dev = pci_dev;
dev->mbox = cmn_dev->mbox;
dev->bar2 = cmn_dev->bar2;
tim_parse_devargs(pci_dev->device.devargs, dev);
otx2_mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
rc = otx2_mbox_process_msg(dev->mbox, (void *)&rsrc_cnt);
if (rc < 0) {
otx2_err("Unable to get free rsrc count.");
goto mz_free;
}
dev->nb_rings = dev->min_ring_cnt ?
RTE_MIN(dev->min_ring_cnt, rsrc_cnt->tim) : rsrc_cnt->tim;
if (!dev->nb_rings) {
otx2_tim_dbg("No TIM Logical functions provisioned.");
goto mz_free;
}
atch_req = otx2_mbox_alloc_msg_attach_resources(dev->mbox);
atch_req->modify = true;
atch_req->timlfs = dev->nb_rings;
rc = otx2_mbox_process(dev->mbox);
if (rc < 0) {
otx2_err("Unable to attach TIM rings.");
goto mz_free;
}
rc = tim_get_msix_offsets();
if (rc < 0) {
otx2_err("Unable to get MSIX offsets for TIM.");
goto detach;
}
if (dev->chunk_slots &&
dev->chunk_slots <= OTX2_TIM_MAX_CHUNK_SLOTS &&
dev->chunk_slots >= OTX2_TIM_MIN_CHUNK_SLOTS) {
dev->chunk_sz = (dev->chunk_slots + 1) *
OTX2_TIM_CHUNK_ALIGNMENT;
} else {
dev->chunk_sz = OTX2_TIM_RING_DEF_CHUNK_SZ;
}
return;
detach:
dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
dtch_req->partial = true;
dtch_req->timlfs = true;
otx2_mbox_process(dev->mbox);
mz_free:
rte_memzone_free(mz);
}
void
otx2_tim_fini(void)
{
struct otx2_tim_evdev *dev = tim_priv_get();
struct rsrc_detach_req *dtch_req;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
dtch_req = otx2_mbox_alloc_msg_detach_resources(dev->mbox);
dtch_req->partial = true;
dtch_req->timlfs = true;
otx2_mbox_process(dev->mbox);
rte_memzone_free(rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME)));
}

View File

@ -1,256 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_TIM_EVDEV_H__
#define __OTX2_TIM_EVDEV_H__
#include <event_timer_adapter_pmd.h>
#include <rte_event_timer_adapter.h>
#include <rte_reciprocal.h>
#include "otx2_dev.h"
#define OTX2_TIM_EVDEV_NAME otx2_tim_eventdev
#define otx2_tim_func_trace otx2_tim_dbg
#define TIM_LF_RING_AURA (0x0)
#define TIM_LF_RING_BASE (0x130)
#define TIM_LF_NRSPERR_INT (0x200)
#define TIM_LF_NRSPERR_INT_W1S (0x208)
#define TIM_LF_NRSPERR_INT_ENA_W1S (0x210)
#define TIM_LF_NRSPERR_INT_ENA_W1C (0x218)
#define TIM_LF_RAS_INT (0x300)
#define TIM_LF_RAS_INT_W1S (0x308)
#define TIM_LF_RAS_INT_ENA_W1S (0x310)
#define TIM_LF_RAS_INT_ENA_W1C (0x318)
#define TIM_LF_RING_REL (0x400)
#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ULL << (64 - \
TIM_BUCKET_W1_S_CHUNK_REMAINDER)) - 1)
#define TIM_BUCKET_W1_S_LOCK (40)
#define TIM_BUCKET_W1_M_LOCK ((1ULL << \
(TIM_BUCKET_W1_S_CHUNK_REMAINDER - \
TIM_BUCKET_W1_S_LOCK)) - 1)
#define TIM_BUCKET_W1_S_RSVD (35)
#define TIM_BUCKET_W1_S_BSK (34)
#define TIM_BUCKET_W1_M_BSK ((1ULL << \
(TIM_BUCKET_W1_S_RSVD - \
TIM_BUCKET_W1_S_BSK)) - 1)
#define TIM_BUCKET_W1_S_HBT (33)
#define TIM_BUCKET_W1_M_HBT ((1ULL << \
(TIM_BUCKET_W1_S_BSK - \
TIM_BUCKET_W1_S_HBT)) - 1)
#define TIM_BUCKET_W1_S_SBT (32)
#define TIM_BUCKET_W1_M_SBT ((1ULL << \
(TIM_BUCKET_W1_S_HBT - \
TIM_BUCKET_W1_S_SBT)) - 1)
#define TIM_BUCKET_W1_S_NUM_ENTRIES (0)
#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ULL << \
(TIM_BUCKET_W1_S_SBT - \
TIM_BUCKET_W1_S_NUM_ENTRIES)) - 1)
#define TIM_BUCKET_SEMA (TIM_BUCKET_CHUNK_REMAIN)
#define TIM_BUCKET_CHUNK_REMAIN \
(TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER)
#define TIM_BUCKET_LOCK \
(TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK)
#define TIM_BUCKET_SEMA_WLOCK \
(TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
#define OTX2_MAX_TIM_RINGS (256)
#define OTX2_TIM_MAX_BUCKETS (0xFFFFF)
#define OTX2_TIM_RING_DEF_CHUNK_SZ (4096)
#define OTX2_TIM_CHUNK_ALIGNMENT (16)
#define OTX2_TIM_MAX_BURST (RTE_CACHE_LINE_SIZE / \
OTX2_TIM_CHUNK_ALIGNMENT)
#define OTX2_TIM_NB_CHUNK_SLOTS(sz) (((sz) / OTX2_TIM_CHUNK_ALIGNMENT) - 1)
#define OTX2_TIM_MIN_CHUNK_SLOTS (0x8)
#define OTX2_TIM_MAX_CHUNK_SLOTS (0x1FFE)
#define OTX2_TIM_MIN_TMO_TKS (256)
#define OTX2_TIM_SP 0x1
#define OTX2_TIM_MP 0x2
#define OTX2_TIM_ENA_FB 0x10
#define OTX2_TIM_ENA_DFB 0x20
#define OTX2_TIM_ENA_STATS 0x40
enum otx2_tim_clk_src {
OTX2_TIM_CLK_SRC_10NS = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
OTX2_TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
OTX2_TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
OTX2_TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
};
struct otx2_tim_bkt {
uint64_t first_chunk;
union {
uint64_t w1;
struct {
uint32_t nb_entry;
uint8_t sbt:1;
uint8_t hbt:1;
uint8_t bsk:1;
uint8_t rsvd:5;
uint8_t lock;
int16_t chunk_remainder;
};
};
uint64_t current_chunk;
uint64_t pad;
} __rte_packed __rte_aligned(32);
struct otx2_tim_ent {
uint64_t w0;
uint64_t wqe;
} __rte_packed;
struct otx2_tim_ctl {
uint16_t ring;
uint16_t chunk_slots;
uint16_t disable_npa;
uint16_t enable_stats;
};
struct otx2_tim_evdev {
struct rte_pci_device *pci_dev;
struct rte_eventdev *event_dev;
struct otx2_mbox *mbox;
uint16_t nb_rings;
uint32_t chunk_sz;
uintptr_t bar2;
/* Dev args */
uint8_t disable_npa;
uint16_t chunk_slots;
uint16_t min_ring_cnt;
uint8_t enable_stats;
uint16_t ring_ctl_cnt;
struct otx2_tim_ctl *ring_ctl_data;
/* HW const */
/* MSIX offsets */
uint16_t tim_msixoff[OTX2_MAX_TIM_RINGS];
};
struct otx2_tim_ring {
uintptr_t base;
uint16_t nb_chunk_slots;
uint32_t nb_bkts;
uint64_t last_updt_cyc;
uint64_t ring_start_cyc;
uint64_t tck_int;
uint64_t tot_int;
struct otx2_tim_bkt *bkt;
struct rte_mempool *chunk_pool;
struct rte_reciprocal_u64 fast_div;
struct rte_reciprocal_u64 fast_bkt;
uint64_t arm_cnt;
uint8_t prod_type_sp;
uint8_t enable_stats;
uint8_t disable_npa;
uint8_t ena_dfb;
uint8_t ena_periodic;
uint16_t ring_id;
uint32_t aura;
uint64_t nb_timers;
uint64_t tck_nsec;
uint64_t max_tout;
uint64_t nb_chunks;
uint64_t chunk_sz;
uint64_t tenns_clk_freq;
enum otx2_tim_clk_src clk_src;
} __rte_cache_aligned;
static inline struct otx2_tim_evdev *
tim_priv_get(void)
{
const struct rte_memzone *mz;
mz = rte_memzone_lookup(RTE_STR(OTX2_TIM_EVDEV_NAME));
if (mz == NULL)
return NULL;
return mz->addr;
}
#ifdef RTE_ARCH_ARM64
static inline uint64_t
tim_cntvct(void)
{
return __rte_arm64_cntvct();
}
static inline uint64_t
tim_cntfrq(void)
{
return __rte_arm64_cntfrq();
}
#else
static inline uint64_t
tim_cntvct(void)
{
return 0;
}
static inline uint64_t
tim_cntfrq(void)
{
return 0;
}
#endif
#define TIM_ARM_FASTPATH_MODES \
FP(sp, 0, 0, 0, OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
FP(mp, 0, 0, 1, OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
FP(fb_sp, 0, 1, 0, OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
FP(fb_mp, 0, 1, 1, OTX2_TIM_ENA_FB | OTX2_TIM_MP) \
FP(stats_mod_sp, 1, 0, 0, \
OTX2_TIM_ENA_STATS | OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
FP(stats_mod_mp, 1, 0, 1, \
OTX2_TIM_ENA_STATS | OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
FP(stats_mod_fb_sp, 1, 1, 0, \
OTX2_TIM_ENA_STATS | OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
FP(stats_mod_fb_mp, 1, 1, 1, \
OTX2_TIM_ENA_STATS | OTX2_TIM_ENA_FB | OTX2_TIM_MP)
#define TIM_ARM_TMO_FASTPATH_MODES \
FP(dfb, 0, 0, OTX2_TIM_ENA_DFB) \
FP(fb, 0, 1, OTX2_TIM_ENA_FB) \
FP(stats_dfb, 1, 0, OTX2_TIM_ENA_STATS | OTX2_TIM_ENA_DFB) \
FP(stats_fb, 1, 1, OTX2_TIM_ENA_STATS | OTX2_TIM_ENA_FB)
#define FP(_name, _f3, _f2, _f1, flags) \
uint16_t otx2_tim_arm_burst_##_name( \
const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, const uint16_t nb_timers);
TIM_ARM_FASTPATH_MODES
#undef FP
#define FP(_name, _f2, _f1, flags) \
uint16_t otx2_tim_arm_tmo_tick_burst_##_name( \
const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, const uint64_t timeout_tick, \
const uint16_t nb_timers);
TIM_ARM_TMO_FASTPATH_MODES
#undef FP
uint16_t otx2_tim_timer_cancel_burst(
const struct rte_event_timer_adapter *adptr,
struct rte_event_timer **tim, const uint16_t nb_timers);
int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps,
const struct event_timer_adapter_ops **ops);
void otx2_tim_init(struct rte_pci_device *pci_dev, struct otx2_dev *cmn_dev);
void otx2_tim_fini(void);
/* TIM IRQ */
int tim_register_irq(uint16_t ring_id);
void tim_unregister_irq(uint16_t ring_id);
#endif /* __OTX2_TIM_EVDEV_H__ */

View File

@ -1,192 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include "otx2_tim_evdev.h"
#include "otx2_tim_worker.h"
static inline int
tim_arm_checks(const struct otx2_tim_ring * const tim_ring,
struct rte_event_timer * const tim)
{
if (unlikely(tim->state)) {
tim->state = RTE_EVENT_TIMER_ERROR;
rte_errno = EALREADY;
goto fail;
}
if (unlikely(!tim->timeout_ticks ||
tim->timeout_ticks >= tim_ring->nb_bkts)) {
tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
: RTE_EVENT_TIMER_ERROR_TOOEARLY;
rte_errno = EINVAL;
goto fail;
}
return 0;
fail:
return -EINVAL;
}
static inline void
tim_format_event(const struct rte_event_timer * const tim,
struct otx2_tim_ent * const entry)
{
entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
(tim->ev.event & 0xFFFFFFFFF);
entry->wqe = tim->ev.u64;
}
static inline void
tim_sync_start_cyc(struct otx2_tim_ring *tim_ring)
{
uint64_t cur_cyc = tim_cntvct();
uint32_t real_bkt;
if (cur_cyc - tim_ring->last_updt_cyc > tim_ring->tot_int) {
real_bkt = otx2_read64(tim_ring->base + TIM_LF_RING_REL) >> 44;
cur_cyc = tim_cntvct();
tim_ring->ring_start_cyc = cur_cyc -
(real_bkt * tim_ring->tck_int);
tim_ring->last_updt_cyc = cur_cyc;
}
}
static __rte_always_inline uint16_t
tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
struct rte_event_timer **tim,
const uint16_t nb_timers,
const uint8_t flags)
{
struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
struct otx2_tim_ent entry;
uint16_t index;
int ret;
tim_sync_start_cyc(tim_ring);
for (index = 0; index < nb_timers; index++) {
if (tim_arm_checks(tim_ring, tim[index]))
break;
tim_format_event(tim[index], &entry);
if (flags & OTX2_TIM_SP)
ret = tim_add_entry_sp(tim_ring,
tim[index]->timeout_ticks,
tim[index], &entry, flags);
if (flags & OTX2_TIM_MP)
ret = tim_add_entry_mp(tim_ring,
tim[index]->timeout_ticks,
tim[index], &entry, flags);
if (unlikely(ret)) {
rte_errno = -ret;
break;
}
}
if (flags & OTX2_TIM_ENA_STATS)
__atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
return index;
}
static __rte_always_inline uint16_t
tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
struct rte_event_timer **tim,
const uint64_t timeout_tick,
const uint16_t nb_timers, const uint8_t flags)
{
struct otx2_tim_ent entry[OTX2_TIM_MAX_BURST] __rte_cache_aligned;
struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
uint16_t set_timers = 0;
uint16_t arr_idx = 0;
uint16_t idx;
int ret;
if (unlikely(!timeout_tick || timeout_tick >= tim_ring->nb_bkts)) {
const enum rte_event_timer_state state = timeout_tick ?
RTE_EVENT_TIMER_ERROR_TOOLATE :
RTE_EVENT_TIMER_ERROR_TOOEARLY;
for (idx = 0; idx < nb_timers; idx++)
tim[idx]->state = state;
rte_errno = EINVAL;
return 0;
}
tim_sync_start_cyc(tim_ring);
while (arr_idx < nb_timers) {
for (idx = 0; idx < OTX2_TIM_MAX_BURST && (arr_idx < nb_timers);
idx++, arr_idx++) {
tim_format_event(tim[arr_idx], &entry[idx]);
}
ret = tim_add_entry_brst(tim_ring, timeout_tick,
&tim[set_timers], entry, idx, flags);
set_timers += ret;
if (ret != idx)
break;
}
if (flags & OTX2_TIM_ENA_STATS)
__atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
__ATOMIC_RELAXED);
return set_timers;
}
#define FP(_name, _f3, _f2, _f1, _flags) \
uint16_t __rte_noinline \
otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, \
const uint16_t nb_timers) \
{ \
return tim_timer_arm_burst(adptr, tim, nb_timers, _flags); \
}
TIM_ARM_FASTPATH_MODES
#undef FP
#define FP(_name, _f2, _f1, _flags) \
uint16_t __rte_noinline \
otx2_tim_arm_tmo_tick_burst_ ## _name( \
const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, \
const uint64_t timeout_tick, \
const uint16_t nb_timers) \
{ \
return tim_timer_arm_tmo_brst(adptr, tim, timeout_tick, \
nb_timers, _flags); \
}
TIM_ARM_TMO_FASTPATH_MODES
#undef FP
uint16_t
otx2_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
struct rte_event_timer **tim,
const uint16_t nb_timers)
{
uint16_t index;
int ret;
RTE_SET_USED(adptr);
rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
for (index = 0; index < nb_timers; index++) {
if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
rte_errno = EALREADY;
break;
}
if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
rte_errno = EINVAL;
break;
}
ret = tim_rm_entry(tim[index]);
if (ret) {
rte_errno = -ret;
break;
}
}
return index;
}

View File

@ -1,598 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_TIM_WORKER_H__
#define __OTX2_TIM_WORKER_H__
#include "otx2_tim_evdev.h"
static inline uint8_t
tim_bkt_fetch_lock(uint64_t w1)
{
return (w1 >> TIM_BUCKET_W1_S_LOCK) &
TIM_BUCKET_W1_M_LOCK;
}
static inline int16_t
tim_bkt_fetch_rem(uint64_t w1)
{
return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
TIM_BUCKET_W1_M_CHUNK_REMAINDER;
}
static inline int16_t
tim_bkt_get_rem(struct otx2_tim_bkt *bktp)
{
return __atomic_load_n(&bktp->chunk_remainder, __ATOMIC_ACQUIRE);
}
static inline void
tim_bkt_set_rem(struct otx2_tim_bkt *bktp, uint16_t v)
{
__atomic_store_n(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
}
static inline void
tim_bkt_sub_rem(struct otx2_tim_bkt *bktp, uint16_t v)
{
__atomic_fetch_sub(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
}
static inline uint8_t
tim_bkt_get_hbt(uint64_t w1)
{
return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
}
static inline uint8_t
tim_bkt_get_bsk(uint64_t w1)
{
return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
}
static inline uint64_t
tim_bkt_clr_bsk(struct otx2_tim_bkt *bktp)
{
/* Clear everything except lock. */
const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
}
static inline uint64_t
tim_bkt_fetch_sema_lock(struct otx2_tim_bkt *bktp)
{
return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
__ATOMIC_ACQUIRE);
}
static inline uint64_t
tim_bkt_fetch_sema(struct otx2_tim_bkt *bktp)
{
return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, __ATOMIC_RELAXED);
}
static inline uint64_t
tim_bkt_inc_lock(struct otx2_tim_bkt *bktp)
{
const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQUIRE);
}
static inline void
tim_bkt_dec_lock(struct otx2_tim_bkt *bktp)
{
__atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELEASE);
}
static inline void
tim_bkt_dec_lock_relaxed(struct otx2_tim_bkt *bktp)
{
__atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELAXED);
}
static inline uint32_t
tim_bkt_get_nent(uint64_t w1)
{
return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
TIM_BUCKET_W1_M_NUM_ENTRIES;
}
static inline void
tim_bkt_inc_nent(struct otx2_tim_bkt *bktp)
{
__atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
}
static inline void
tim_bkt_add_nent(struct otx2_tim_bkt *bktp, uint32_t v)
{
__atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
}
static inline uint64_t
tim_bkt_clr_nent(struct otx2_tim_bkt *bktp)
{
const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
TIM_BUCKET_W1_S_NUM_ENTRIES);
return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
}
static inline uint64_t
tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R)
{
return (n - (d * rte_reciprocal_divide_u64(n, &R)));
}
static __rte_always_inline void
tim_get_target_bucket(struct otx2_tim_ring *const tim_ring,
const uint32_t rel_bkt, struct otx2_tim_bkt **bkt,
struct otx2_tim_bkt **mirr_bkt)
{
const uint64_t bkt_cyc = tim_cntvct() - tim_ring->ring_start_cyc;
uint64_t bucket =
rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div) +
rel_bkt;
uint64_t mirr_bucket = 0;
bucket =
tim_bkt_fast_mod(bucket, tim_ring->nb_bkts, tim_ring->fast_bkt);
mirr_bucket = tim_bkt_fast_mod(bucket + (tim_ring->nb_bkts >> 1),
tim_ring->nb_bkts, tim_ring->fast_bkt);
*bkt = &tim_ring->bkt[bucket];
*mirr_bkt = &tim_ring->bkt[mirr_bucket];
}
static struct otx2_tim_ent *
tim_clr_bkt(struct otx2_tim_ring * const tim_ring,
struct otx2_tim_bkt * const bkt)
{
#define TIM_MAX_OUTSTANDING_OBJ 64
void *pend_chunks[TIM_MAX_OUTSTANDING_OBJ];
struct otx2_tim_ent *chunk;
struct otx2_tim_ent *pnext;
uint8_t objs = 0;
chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk);
chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk +
tim_ring->nb_chunk_slots)->w0;
while (chunk) {
pnext = (struct otx2_tim_ent *)(uintptr_t)
((chunk + tim_ring->nb_chunk_slots)->w0);
if (objs == TIM_MAX_OUTSTANDING_OBJ) {
rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
objs);
objs = 0;
}
pend_chunks[objs++] = chunk;
chunk = pnext;
}
if (objs)
rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
objs);
return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk;
}
static struct otx2_tim_ent *
tim_refill_chunk(struct otx2_tim_bkt * const bkt,
struct otx2_tim_bkt * const mirr_bkt,
struct otx2_tim_ring * const tim_ring)
{
struct otx2_tim_ent *chunk;
if (bkt->nb_entry || !bkt->first_chunk) {
if (unlikely(rte_mempool_get(tim_ring->chunk_pool,
(void **)&chunk)))
return NULL;
if (bkt->nb_entry) {
*(uint64_t *)(((struct otx2_tim_ent *)
mirr_bkt->current_chunk) +
tim_ring->nb_chunk_slots) =
(uintptr_t)chunk;
} else {
bkt->first_chunk = (uintptr_t)chunk;
}
} else {
chunk = tim_clr_bkt(tim_ring, bkt);
bkt->first_chunk = (uintptr_t)chunk;
}
*(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
return chunk;
}
static struct otx2_tim_ent *
tim_insert_chunk(struct otx2_tim_bkt * const bkt,
struct otx2_tim_bkt * const mirr_bkt,
struct otx2_tim_ring * const tim_ring)
{
struct otx2_tim_ent *chunk;
if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk)))
return NULL;
*(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
if (bkt->nb_entry) {
*(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
mirr_bkt->current_chunk) +
tim_ring->nb_chunk_slots) = (uintptr_t)chunk;
} else {
bkt->first_chunk = (uintptr_t)chunk;
}
return chunk;
}
static __rte_always_inline int
tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
const uint32_t rel_bkt,
struct rte_event_timer * const tim,
const struct otx2_tim_ent * const pent,
const uint8_t flags)
{
struct otx2_tim_bkt *mirr_bkt;
struct otx2_tim_ent *chunk;
struct otx2_tim_bkt *bkt;
uint64_t lock_sema;
int16_t rem;
__retry:
tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
/* Get Bucket sema*/
lock_sema = tim_bkt_fetch_sema_lock(bkt);
/* Bucket related checks. */
if (unlikely(tim_bkt_get_hbt(lock_sema))) {
if (tim_bkt_get_nent(lock_sema) != 0) {
uint64_t hbt_state;
#ifdef RTE_ARCH_ARM64
asm volatile(" ldxr %[hbt], [%[w1]] \n"
" tbz %[hbt], 33, dne%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldxr %[hbt], [%[w1]] \n"
" tbnz %[hbt], 33, rty%= \n"
"dne%=: \n"
: [hbt] "=&r"(hbt_state)
: [w1] "r"((&bkt->w1))
: "memory");
#else
do {
hbt_state = __atomic_load_n(&bkt->w1,
__ATOMIC_RELAXED);
} while (hbt_state & BIT_ULL(33));
#endif
if (!(hbt_state & BIT_ULL(34))) {
tim_bkt_dec_lock(bkt);
goto __retry;
}
}
}
/* Insert the work. */
rem = tim_bkt_fetch_rem(lock_sema);
if (!rem) {
if (flags & OTX2_TIM_ENA_FB)
chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
if (unlikely(chunk == NULL)) {
bkt->chunk_remainder = 0;
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
tim_bkt_dec_lock(bkt);
return -ENOMEM;
}
mirr_bkt->current_chunk = (uintptr_t)chunk;
bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
} else {
chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
chunk += tim_ring->nb_chunk_slots - rem;
}
/* Copy work entry. */
*chunk = *pent;
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
__atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
tim_bkt_inc_nent(bkt);
tim_bkt_dec_lock_relaxed(bkt);
return 0;
}
static __rte_always_inline int
tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
const uint32_t rel_bkt,
struct rte_event_timer * const tim,
const struct otx2_tim_ent * const pent,
const uint8_t flags)
{
struct otx2_tim_bkt *mirr_bkt;
struct otx2_tim_ent *chunk;
struct otx2_tim_bkt *bkt;
uint64_t lock_sema;
int16_t rem;
__retry:
tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
/* Get Bucket sema*/
lock_sema = tim_bkt_fetch_sema_lock(bkt);
/* Bucket related checks. */
if (unlikely(tim_bkt_get_hbt(lock_sema))) {
if (tim_bkt_get_nent(lock_sema) != 0) {
uint64_t hbt_state;
#ifdef RTE_ARCH_ARM64
asm volatile(" ldxr %[hbt], [%[w1]] \n"
" tbz %[hbt], 33, dne%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldxr %[hbt], [%[w1]] \n"
" tbnz %[hbt], 33, rty%= \n"
"dne%=: \n"
: [hbt] "=&r"(hbt_state)
: [w1] "r"((&bkt->w1))
: "memory");
#else
do {
hbt_state = __atomic_load_n(&bkt->w1,
__ATOMIC_RELAXED);
} while (hbt_state & BIT_ULL(33));
#endif
if (!(hbt_state & BIT_ULL(34))) {
tim_bkt_dec_lock(bkt);
goto __retry;
}
}
}
rem = tim_bkt_fetch_rem(lock_sema);
if (rem < 0) {
tim_bkt_dec_lock(bkt);
#ifdef RTE_ARCH_ARM64
uint64_t w1;
asm volatile(" ldxr %[w1], [%[crem]] \n"
" tbz %[w1], 63, dne%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldxr %[w1], [%[crem]] \n"
" tbnz %[w1], 63, rty%= \n"
"dne%=: \n"
: [w1] "=&r"(w1)
: [crem] "r"(&bkt->w1)
: "memory");
#else
while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) <
0)
;
#endif
goto __retry;
} else if (!rem) {
/* Only one thread can be here*/
if (flags & OTX2_TIM_ENA_FB)
chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
if (unlikely(chunk == NULL)) {
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
tim_bkt_set_rem(bkt, 0);
tim_bkt_dec_lock(bkt);
return -ENOMEM;
}
*chunk = *pent;
if (tim_bkt_fetch_lock(lock_sema)) {
do {
lock_sema = __atomic_load_n(&bkt->w1,
__ATOMIC_RELAXED);
} while (tim_bkt_fetch_lock(lock_sema) - 1);
rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
}
mirr_bkt->current_chunk = (uintptr_t)chunk;
__atomic_store_n(&bkt->chunk_remainder,
tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE);
} else {
chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
chunk += tim_ring->nb_chunk_slots - rem;
*chunk = *pent;
}
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
__atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
tim_bkt_inc_nent(bkt);
tim_bkt_dec_lock_relaxed(bkt);
return 0;
}
static inline uint16_t
tim_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
struct otx2_tim_ent *chunk,
struct rte_event_timer ** const tim,
const struct otx2_tim_ent * const ents,
const struct otx2_tim_bkt * const bkt)
{
for (; index < cpy_lmt; index++) {
*chunk = *(ents + index);
tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
tim[index]->impl_opaque[1] = (uintptr_t)bkt;
tim[index]->state = RTE_EVENT_TIMER_ARMED;
}
return index;
}
/* Burst mode functions */
static inline int
tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
const uint16_t rel_bkt,
struct rte_event_timer ** const tim,
const struct otx2_tim_ent *ents,
const uint16_t nb_timers, const uint8_t flags)
{
struct otx2_tim_ent *chunk = NULL;
struct otx2_tim_bkt *mirr_bkt;
struct otx2_tim_bkt *bkt;
uint16_t chunk_remainder;
uint16_t index = 0;
uint64_t lock_sema;
int16_t rem, crem;
uint8_t lock_cnt;
__retry:
tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
/* Only one thread beyond this. */
lock_sema = tim_bkt_inc_lock(bkt);
lock_cnt = (uint8_t)
((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
if (lock_cnt) {
tim_bkt_dec_lock(bkt);
#ifdef RTE_ARCH_ARM64
asm volatile(" ldxrb %w[lock_cnt], [%[lock]] \n"
" tst %w[lock_cnt], 255 \n"
" beq dne%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldxrb %w[lock_cnt], [%[lock]] \n"
" tst %w[lock_cnt], 255 \n"
" bne rty%= \n"
"dne%=: \n"
: [lock_cnt] "=&r"(lock_cnt)
: [lock] "r"(&bkt->lock)
: "memory");
#else
while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED))
;
#endif
goto __retry;
}
/* Bucket related checks. */
if (unlikely(tim_bkt_get_hbt(lock_sema))) {
if (tim_bkt_get_nent(lock_sema) != 0) {
uint64_t hbt_state;
#ifdef RTE_ARCH_ARM64
asm volatile(" ldxr %[hbt], [%[w1]] \n"
" tbz %[hbt], 33, dne%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldxr %[hbt], [%[w1]] \n"
" tbnz %[hbt], 33, rty%= \n"
"dne%=: \n"
: [hbt] "=&r"(hbt_state)
: [w1] "r"((&bkt->w1))
: "memory");
#else
do {
hbt_state = __atomic_load_n(&bkt->w1,
__ATOMIC_RELAXED);
} while (hbt_state & BIT_ULL(33));
#endif
if (!(hbt_state & BIT_ULL(34))) {
tim_bkt_dec_lock(bkt);
goto __retry;
}
}
}
chunk_remainder = tim_bkt_fetch_rem(lock_sema);
rem = chunk_remainder - nb_timers;
if (rem < 0) {
crem = tim_ring->nb_chunk_slots - chunk_remainder;
if (chunk_remainder && crem) {
chunk = ((struct otx2_tim_ent *)
mirr_bkt->current_chunk) + crem;
index = tim_cpy_wrk(index, chunk_remainder, chunk, tim,
ents, bkt);
tim_bkt_sub_rem(bkt, chunk_remainder);
tim_bkt_add_nent(bkt, chunk_remainder);
}
if (flags & OTX2_TIM_ENA_FB)
chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
if (unlikely(chunk == NULL)) {
tim_bkt_dec_lock(bkt);
rte_errno = ENOMEM;
tim[index]->state = RTE_EVENT_TIMER_ERROR;
return crem;
}
*(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
mirr_bkt->current_chunk = (uintptr_t)chunk;
tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
rem = nb_timers - chunk_remainder;
tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem);
tim_bkt_add_nent(bkt, rem);
} else {
chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
chunk += (tim_ring->nb_chunk_slots - chunk_remainder);
tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
tim_bkt_sub_rem(bkt, nb_timers);
tim_bkt_add_nent(bkt, nb_timers);
}
tim_bkt_dec_lock(bkt);
return nb_timers;
}
static int
tim_rm_entry(struct rte_event_timer *tim)
{
struct otx2_tim_ent *entry;
struct otx2_tim_bkt *bkt;
uint64_t lock_sema;
if (tim->impl_opaque[1] == 0 || tim->impl_opaque[0] == 0)
return -ENOENT;
entry = (struct otx2_tim_ent *)(uintptr_t)tim->impl_opaque[0];
if (entry->wqe != tim->ev.u64) {
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
return -ENOENT;
}
bkt = (struct otx2_tim_bkt *)(uintptr_t)tim->impl_opaque[1];
lock_sema = tim_bkt_inc_lock(bkt);
if (tim_bkt_get_hbt(lock_sema) || !tim_bkt_get_nent(lock_sema)) {
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim_bkt_dec_lock(bkt);
return -ENOENT;
}
entry->w0 = 0;
entry->wqe = 0;
tim->state = RTE_EVENT_TIMER_CANCELED;
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim_bkt_dec_lock(bkt);
return 0;
}
#endif /* __OTX2_TIM_WORKER_H__ */

View File

@ -1,372 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include "otx2_worker.h"
static __rte_noinline uint8_t
otx2_ssogws_new_event(struct otx2_ssogws *ws, const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint64_t event_ptr = ev->u64;
const uint16_t grp = ev->queue_id;
if (ws->xaq_lmt <= *ws->fc_mem)
return 0;
otx2_ssogws_add_work(ws, event_ptr, tag, new_tt, grp);
return 1;
}
static __rte_always_inline void
otx2_ssogws_fwd_swtag(struct otx2_ssogws *ws, const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint8_t cur_tt = OTX2_SSOW_TT_FROM_TAG(otx2_read64(ws->tag_op));
/* 96XX model
* cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
*
* SSO_SYNC_ORDERED norm norm untag
* SSO_SYNC_ATOMIC norm norm untag
* SSO_SYNC_UNTAGGED norm norm NOOP
*/
if (new_tt == SSO_SYNC_UNTAGGED) {
if (cur_tt != SSO_SYNC_UNTAGGED)
otx2_ssogws_swtag_untag(ws);
} else {
otx2_ssogws_swtag_norm(ws, tag, new_tt);
}
ws->swtag_req = 1;
}
static __rte_always_inline void
otx2_ssogws_fwd_group(struct otx2_ssogws *ws, const struct rte_event *ev,
const uint16_t grp)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
SSOW_LF_GWS_OP_UPD_WQP_GRP1);
rte_smp_wmb();
otx2_ssogws_swtag_desched(ws, tag, new_tt, grp);
}
static __rte_always_inline void
otx2_ssogws_forward_event(struct otx2_ssogws *ws, const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
if (OTX2_SSOW_GRP_FROM_TAG(otx2_read64(ws->tag_op)) == grp)
otx2_ssogws_fwd_swtag(ws, ev);
else
/*
* Group has been changed for group based work pipelining,
* Use deschedule/add_work operation to transfer the event to
* new group/core
*/
otx2_ssogws_fwd_group(ws, ev, grp);
}
#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_hot \
otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws *ws = port; \
\
RTE_SET_USED(timeout_ticks); \
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
otx2_ssogws_swtag_wait(ws); \
return 1; \
} \
\
return otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
} \
\
uint16_t __rte_hot \
otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_deq_ ##name(port, ev, timeout_ticks); \
} \
\
uint16_t __rte_hot \
otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws *ws = port; \
uint16_t ret = 1; \
uint64_t iter; \
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
otx2_ssogws_swtag_wait(ws); \
return ret; \
} \
\
ret = otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
ret = otx2_ssogws_get_work(ws, ev, flags, \
ws->lookup_mem); \
\
return ret; \
} \
\
uint16_t __rte_hot \
otx2_ssogws_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],\
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_deq_timeout_ ##name(port, ev, timeout_ticks);\
} \
\
uint16_t __rte_hot \
otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws *ws = port; \
\
RTE_SET_USED(timeout_ticks); \
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
otx2_ssogws_swtag_wait(ws); \
return 1; \
} \
\
return otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
ws->lookup_mem); \
} \
\
uint16_t __rte_hot \
otx2_ssogws_deq_seg_burst_ ##name(void *port, struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_deq_seg_ ##name(port, ev, timeout_ticks); \
} \
\
uint16_t __rte_hot \
otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws *ws = port; \
uint16_t ret = 1; \
uint64_t iter; \
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
otx2_ssogws_swtag_wait(ws); \
return ret; \
} \
\
ret = otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
ws->lookup_mem); \
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
ret = otx2_ssogws_get_work(ws, ev, \
flags | NIX_RX_MULTI_SEG_F, \
ws->lookup_mem); \
\
return ret; \
} \
\
uint16_t __rte_hot \
otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_deq_seg_timeout_ ##name(port, ev, \
timeout_ticks); \
}
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
uint16_t __rte_hot
otx2_ssogws_enq(void *port, const struct rte_event *ev)
{
struct otx2_ssogws *ws = port;
switch (ev->op) {
case RTE_EVENT_OP_NEW:
rte_smp_mb();
return otx2_ssogws_new_event(ws, ev);
case RTE_EVENT_OP_FORWARD:
otx2_ssogws_forward_event(ws, ev);
break;
case RTE_EVENT_OP_RELEASE:
otx2_ssogws_swtag_flush(ws->tag_op, ws->swtag_flush_op);
break;
default:
return 0;
}
return 1;
}
uint16_t __rte_hot
otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
RTE_SET_USED(nb_events);
return otx2_ssogws_enq(port, ev);
}
uint16_t __rte_hot
otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
struct otx2_ssogws *ws = port;
uint16_t i, rc = 1;
rte_smp_mb();
if (ws->xaq_lmt <= *ws->fc_mem)
return 0;
for (i = 0; i < nb_events && rc; i++)
rc = otx2_ssogws_new_event(ws, &ev[i]);
return nb_events;
}
uint16_t __rte_hot
otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
struct otx2_ssogws *ws = port;
RTE_SET_USED(nb_events);
otx2_ssogws_forward_event(ws, ev);
return 1;
}
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_hot \
otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[], \
uint16_t nb_events) \
{ \
struct otx2_ssogws *ws = port; \
uint64_t cmd[sz]; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(ws->base, &ev[0], cmd, \
(const uint64_t \
(*)[RTE_MAX_QUEUES_PER_PORT]) \
&ws->tx_adptr_data, \
flags); \
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_hot \
otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
uint16_t nb_events) \
{ \
uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
struct otx2_ssogws *ws = port; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(ws->base, &ev[0], cmd, \
(const uint64_t \
(*)[RTE_MAX_QUEUES_PER_PORT]) \
&ws->tx_adptr_data, \
(flags) | NIX_TX_MULTI_SEG_F); \
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
void
ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id, uintptr_t base,
otx2_handle_event_t fn, void *arg)
{
uint64_t cq_ds_cnt = 1;
uint64_t aq_cnt = 1;
uint64_t ds_cnt = 1;
struct rte_event ev;
uint64_t enable;
uint64_t val;
enable = otx2_read64(base + SSO_LF_GGRP_QCTL);
if (!enable)
return;
val = queue_id; /* GGRP ID */
val |= BIT_ULL(18); /* Grouped */
val |= BIT_ULL(16); /* WAIT */
aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
cq_ds_cnt &= 0x3FFF3FFF0000;
while (aq_cnt || cq_ds_cnt || ds_cnt) {
otx2_write64(val, ws->getwrk_op);
otx2_ssogws_get_work_empty(ws, &ev, 0);
if (fn != NULL && ev.u64 != 0)
fn(arg, ev);
if (ev.sched_type != SSO_TT_EMPTY)
otx2_ssogws_swtag_flush(ws->tag_op, ws->swtag_flush_op);
rte_mb();
aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
/* Extract cq and ds count */
cq_ds_cnt &= 0x3FFF3FFF0000;
}
otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
SSOW_LF_GWS_OP_GWC_INVAL);
rte_mb();
}
void
ssogws_reset(struct otx2_ssogws *ws)
{
uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
uint64_t pend_state;
uint8_t pend_tt;
uint64_t tag;
/* Wait till getwork/swtp/waitw/desched completes. */
do {
pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
rte_mb();
} while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58)));
tag = otx2_read64(base + SSOW_LF_GWS_TAG);
pend_tt = (tag >> 32) & 0x3;
if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
if (pend_tt == SSO_SYNC_ATOMIC || pend_tt == SSO_SYNC_ORDERED)
otx2_ssogws_swtag_untag(ws);
otx2_ssogws_desched(ws);
}
rte_mb();
/* Wait for desched to complete. */
do {
pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
rte_mb();
} while (pend_state & BIT_ULL(58));
}

View File

@ -1,339 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#ifndef __OTX2_WORKER_H__
#define __OTX2_WORKER_H__
#include <rte_common.h>
#include <rte_branch_prediction.h>
#include <otx2_common.h>
#include "otx2_evdev.h"
#include "otx2_evdev_crypto_adptr_rx.h"
#include "otx2_ethdev_sec_tx.h"
/* SSO Operations */
static __rte_always_inline uint16_t
otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev,
const uint32_t flags, const void * const lookup_mem)
{
union otx2_sso_event event;
uint64_t tstamp_ptr;
uint64_t get_work1;
uint64_t mbuf;
otx2_write64(BIT_ULL(16) | /* wait for work. */
1, /* Use Mask set 0. */
ws->getwrk_op);
if (flags & NIX_RX_OFFLOAD_PTYPE_F)
rte_prefetch_non_temporal(lookup_mem);
#ifdef RTE_ARCH_ARM64
asm volatile(
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbz %[tag], 63, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
" prfm pldl1keep, [%[wqp], #8] \n"
" sub %[mbuf], %[wqp], #0x80 \n"
" prfm pldl1keep, [%[mbuf]] \n"
: [tag] "=&r" (event.get_work0),
[wqp] "=&r" (get_work1),
[mbuf] "=&r" (mbuf)
: [tag_loc] "r" (ws->tag_op),
[wqp_loc] "r" (ws->wqp_op)
);
#else
event.get_work0 = otx2_read64(ws->tag_op);
while ((BIT_ULL(63)) & event.get_work0)
event.get_work0 = otx2_read64(ws->tag_op);
get_work1 = otx2_read64(ws->wqp_op);
rte_prefetch0((const void *)get_work1);
mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
rte_prefetch0((const void *)mbuf);
#endif
event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
(event.get_work0 & (0x3FFull << 36)) << 4 |
(event.get_work0 & 0xffffffff);
if (event.sched_type != SSO_TT_EMPTY) {
if ((flags & NIX_RX_OFFLOAD_SECURITY_F) &&
(event.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
get_work1 = otx2_handle_crypto_event(get_work1);
} else if (event.event_type == RTE_EVENT_TYPE_ETHDEV) {
otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
(uint32_t) event.get_work0, flags,
lookup_mem);
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
get_work1) +
OTX2_SSO_WQE_SG_PTR);
otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
ws->tstamp, flags,
(uint64_t *)tstamp_ptr);
get_work1 = mbuf;
}
}
ev->event = event.get_work0;
ev->u64 = get_work1;
return !!get_work1;
}
/* Used in cleaning up workslot. */
static __rte_always_inline uint16_t
otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev,
const uint32_t flags)
{
union otx2_sso_event event;
uint64_t tstamp_ptr;
uint64_t get_work1;
uint64_t mbuf;
#ifdef RTE_ARCH_ARM64
asm volatile(
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbz %[tag], 63, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
" prfm pldl1keep, [%[wqp], #8] \n"
" sub %[mbuf], %[wqp], #0x80 \n"
" prfm pldl1keep, [%[mbuf]] \n"
: [tag] "=&r" (event.get_work0),
[wqp] "=&r" (get_work1),
[mbuf] "=&r" (mbuf)
: [tag_loc] "r" (ws->tag_op),
[wqp_loc] "r" (ws->wqp_op)
);
#else
event.get_work0 = otx2_read64(ws->tag_op);
while ((BIT_ULL(63)) & event.get_work0)
event.get_work0 = otx2_read64(ws->tag_op);
get_work1 = otx2_read64(ws->wqp_op);
rte_prefetch_non_temporal((const void *)get_work1);
mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
rte_prefetch_non_temporal((const void *)mbuf);
#endif
event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
(event.get_work0 & (0x3FFull << 36)) << 4 |
(event.get_work0 & 0xffffffff);
if (event.sched_type != SSO_TT_EMPTY &&
event.event_type == RTE_EVENT_TYPE_ETHDEV) {
otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
(uint32_t) event.get_work0, flags, NULL);
/* Extracting tstamp, if PTP enabled*/
tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
+ OTX2_SSO_WQE_SG_PTR);
otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
flags, (uint64_t *)tstamp_ptr);
get_work1 = mbuf;
}
ev->event = event.get_work0;
ev->u64 = get_work1;
return !!get_work1;
}
static __rte_always_inline void
otx2_ssogws_add_work(struct otx2_ssogws *ws, const uint64_t event_ptr,
const uint32_t tag, const uint8_t new_tt,
const uint16_t grp)
{
uint64_t add_work0;
add_work0 = tag | ((uint64_t)(new_tt) << 32);
otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
}
static __rte_always_inline void
otx2_ssogws_swtag_desched(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt,
uint16_t grp)
{
uint64_t val;
val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
otx2_write64(val, ws->swtag_desched_op);
}
static __rte_always_inline void
otx2_ssogws_swtag_norm(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt)
{
uint64_t val;
val = tag | ((uint64_t)(new_tt & 0x3) << 32);
otx2_write64(val, ws->swtag_norm_op);
}
static __rte_always_inline void
otx2_ssogws_swtag_untag(struct otx2_ssogws *ws)
{
otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
}
static __rte_always_inline void
otx2_ssogws_swtag_flush(uint64_t tag_op, uint64_t flush_op)
{
if (OTX2_SSOW_TT_FROM_TAG(otx2_read64(tag_op)) == SSO_TT_EMPTY)
return;
otx2_write64(0, flush_op);
}
static __rte_always_inline void
otx2_ssogws_desched(struct otx2_ssogws *ws)
{
otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
SSOW_LF_GWS_OP_DESCHED);
}
static __rte_always_inline void
otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
{
#ifdef RTE_ARCH_ARM64
uint64_t swtp;
asm volatile(" ldr %[swtb], [%[swtp_loc]] \n"
" tbz %[swtb], 62, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[swtb], [%[swtp_loc]] \n"
" tbnz %[swtb], 62, rty%= \n"
"done%=: \n"
: [swtb] "=&r" (swtp)
: [swtp_loc] "r" (ws->tag_op));
#else
/* Wait for the SWTAG/SWTAG_FULL operation */
while (otx2_read64(ws->tag_op) & BIT_ULL(62))
;
#endif
}
static __rte_always_inline void
otx2_ssogws_head_wait(uint64_t tag_op)
{
#ifdef RTE_ARCH_ARM64
uint64_t tag;
asm volatile (
" ldr %[tag], [%[tag_op]] \n"
" tbnz %[tag], 35, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[tag], [%[tag_op]] \n"
" tbz %[tag], 35, rty%= \n"
"done%=: \n"
: [tag] "=&r" (tag)
: [tag_op] "r" (tag_op)
);
#else
/* Wait for the HEAD to be set */
while (!(otx2_read64(tag_op) & BIT_ULL(35)))
;
#endif
}
static __rte_always_inline const struct otx2_eth_txq *
otx2_ssogws_xtract_meta(struct rte_mbuf *m,
const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
{
return (const struct otx2_eth_txq *)txq_data[m->port][
rte_event_eth_tx_adapter_txq_get(m)];
}
static __rte_always_inline void
otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
uint64_t *cmd, const uint32_t flags)
{
otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
otx2_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
}
static __rte_always_inline uint16_t
otx2_ssogws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
const uint32_t flags)
{
struct rte_mbuf *m = ev->mbuf;
const struct otx2_eth_txq *txq;
uint16_t ref_cnt = m->refcnt;
if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
txq = otx2_ssogws_xtract_meta(m, txq_data);
return otx2_sec_event_tx(base, ev, m, txq, flags);
}
/* Perform header writes before barrier for TSO */
otx2_nix_xmit_prepare_tso(m, flags);
/* Lets commit any changes in the packet here in case when
* fast free is set as no further changes will be made to mbuf.
* In case of fast free is not set, both otx2_nix_prepare_mseg()
* and otx2_nix_xmit_prepare() has a barrier after refcnt update.
*/
if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
rte_io_wmb();
txq = otx2_ssogws_xtract_meta(m, txq_data);
otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
if (flags & NIX_TX_MULTI_SEG_F) {
const uint16_t segdw = otx2_nix_prepare_mseg(m, cmd, flags);
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
m->ol_flags, segdw, flags);
if (!ev->sched_type) {
otx2_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
otx2_ssogws_head_wait(base + SSOW_LF_GWS_TAG);
if (otx2_nix_xmit_submit_lmt(txq->io_addr) == 0)
otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr,
txq->io_addr, segdw);
} else {
otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr,
txq->io_addr, segdw);
}
} else {
/* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
m->ol_flags, 4, flags);
if (!ev->sched_type) {
otx2_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
otx2_ssogws_head_wait(base + SSOW_LF_GWS_TAG);
if (otx2_nix_xmit_submit_lmt(txq->io_addr) == 0)
otx2_nix_xmit_one(cmd, txq->lmt_addr,
txq->io_addr, flags);
} else {
otx2_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
flags);
}
}
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
if (ref_cnt > 1)
return 1;
}
otx2_ssogws_swtag_flush(base + SSOW_LF_GWS_TAG,
base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
return 1;
}
#endif

View File

@ -1,345 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2019 Marvell International Ltd.
*/
#include "otx2_worker_dual.h"
#include "otx2_worker.h"
static __rte_noinline uint8_t
otx2_ssogws_dual_new_event(struct otx2_ssogws_dual *ws,
const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint64_t event_ptr = ev->u64;
const uint16_t grp = ev->queue_id;
if (ws->xaq_lmt <= *ws->fc_mem)
return 0;
otx2_ssogws_dual_add_work(ws, event_ptr, tag, new_tt, grp);
return 1;
}
static __rte_always_inline void
otx2_ssogws_dual_fwd_swtag(struct otx2_ssogws_state *ws,
const struct rte_event *ev)
{
const uint8_t cur_tt = OTX2_SSOW_TT_FROM_TAG(otx2_read64(ws->tag_op));
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
/* 96XX model
* cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
*
* SSO_SYNC_ORDERED norm norm untag
* SSO_SYNC_ATOMIC norm norm untag
* SSO_SYNC_UNTAGGED norm norm NOOP
*/
if (new_tt == SSO_SYNC_UNTAGGED) {
if (cur_tt != SSO_SYNC_UNTAGGED)
otx2_ssogws_swtag_untag((struct otx2_ssogws *)ws);
} else {
otx2_ssogws_swtag_norm((struct otx2_ssogws *)ws, tag, new_tt);
}
}
static __rte_always_inline void
otx2_ssogws_dual_fwd_group(struct otx2_ssogws_state *ws,
const struct rte_event *ev, const uint16_t grp)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
SSOW_LF_GWS_OP_UPD_WQP_GRP1);
rte_smp_wmb();
otx2_ssogws_swtag_desched((struct otx2_ssogws *)ws, tag, new_tt, grp);
}
static __rte_always_inline void
otx2_ssogws_dual_forward_event(struct otx2_ssogws_dual *ws,
struct otx2_ssogws_state *vws,
const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
if (OTX2_SSOW_GRP_FROM_TAG(otx2_read64(vws->tag_op)) == grp) {
otx2_ssogws_dual_fwd_swtag(vws, ev);
ws->swtag_req = 1;
} else {
/*
* Group has been changed for group based work pipelining,
* Use deschedule/add_work operation to transfer the event to
* new group/core
*/
otx2_ssogws_dual_fwd_group(vws, ev, grp);
}
}
uint16_t __rte_hot
otx2_ssogws_dual_enq(void *port, const struct rte_event *ev)
{
struct otx2_ssogws_dual *ws = port;
struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
switch (ev->op) {
case RTE_EVENT_OP_NEW:
rte_smp_mb();
return otx2_ssogws_dual_new_event(ws, ev);
case RTE_EVENT_OP_FORWARD:
otx2_ssogws_dual_forward_event(ws, vws, ev);
break;
case RTE_EVENT_OP_RELEASE:
otx2_ssogws_swtag_flush(vws->tag_op, vws->swtag_flush_op);
break;
default:
return 0;
}
return 1;
}
uint16_t __rte_hot
otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
RTE_SET_USED(nb_events);
return otx2_ssogws_dual_enq(port, ev);
}
uint16_t __rte_hot
otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
struct otx2_ssogws_dual *ws = port;
uint16_t i, rc = 1;
rte_smp_mb();
if (ws->xaq_lmt <= *ws->fc_mem)
return 0;
for (i = 0; i < nb_events && rc; i++)
rc = otx2_ssogws_dual_new_event(ws, &ev[i]);
return nb_events;
}
uint16_t __rte_hot
otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
struct otx2_ssogws_dual *ws = port;
struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
RTE_SET_USED(nb_events);
otx2_ssogws_dual_forward_event(ws, vws, ev);
return 1;
}
#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_hot \
otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws_dual *ws = port; \
uint8_t gw; \
\
rte_prefetch_non_temporal(ws); \
RTE_SET_USED(timeout_ticks); \
if (ws->swtag_req) { \
otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
&ws->ws_state[!ws->vws]); \
ws->swtag_req = 0; \
return 1; \
} \
\
gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
&ws->ws_state[!ws->vws], ev, \
flags, ws->lookup_mem, \
ws->tstamp); \
ws->vws = !ws->vws; \
\
return gw; \
} \
\
uint16_t __rte_hot \
otx2_ssogws_dual_deq_burst_ ##name(void *port, struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_dual_deq_ ##name(port, ev, timeout_ticks); \
} \
\
uint16_t __rte_hot \
otx2_ssogws_dual_deq_timeout_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws_dual *ws = port; \
uint64_t iter; \
uint8_t gw; \
\
if (ws->swtag_req) { \
otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
&ws->ws_state[!ws->vws]); \
ws->swtag_req = 0; \
return 1; \
} \
\
gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
&ws->ws_state[!ws->vws], ev, \
flags, ws->lookup_mem, \
ws->tstamp); \
ws->vws = !ws->vws; \
for (iter = 1; iter < timeout_ticks && (gw == 0); iter++) { \
gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
&ws->ws_state[!ws->vws], \
ev, flags, \
ws->lookup_mem, \
ws->tstamp); \
ws->vws = !ws->vws; \
} \
\
return gw; \
} \
\
uint16_t __rte_hot \
otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_dual_deq_timeout_ ##name(port, ev, \
timeout_ticks); \
} \
\
uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws_dual *ws = port; \
uint8_t gw; \
\
RTE_SET_USED(timeout_ticks); \
if (ws->swtag_req) { \
otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
&ws->ws_state[!ws->vws]); \
ws->swtag_req = 0; \
return 1; \
} \
\
gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
&ws->ws_state[!ws->vws], ev, \
flags | NIX_RX_MULTI_SEG_F, \
ws->lookup_mem, \
ws->tstamp); \
ws->vws = !ws->vws; \
\
return gw; \
} \
\
uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_dual_deq_seg_ ##name(port, ev, \
timeout_ticks); \
} \
\
uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port, \
struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
struct otx2_ssogws_dual *ws = port; \
uint64_t iter; \
uint8_t gw; \
\
if (ws->swtag_req) { \
otx2_ssogws_swtag_wait((struct otx2_ssogws *) \
&ws->ws_state[!ws->vws]); \
ws->swtag_req = 0; \
return 1; \
} \
\
gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
&ws->ws_state[!ws->vws], ev, \
flags | NIX_RX_MULTI_SEG_F, \
ws->lookup_mem, \
ws->tstamp); \
ws->vws = !ws->vws; \
for (iter = 1; iter < timeout_ticks && (gw == 0); iter++) { \
gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws], \
&ws->ws_state[!ws->vws], \
ev, flags | \
NIX_RX_MULTI_SEG_F, \
ws->lookup_mem, \
ws->tstamp); \
ws->vws = !ws->vws; \
} \
\
return gw; \
} \
\
uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
{ \
RTE_SET_USED(nb_events); \
\
return otx2_ssogws_dual_deq_seg_timeout_ ##name(port, ev, \
timeout_ticks); \
}
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_hot \
otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events) \
{ \
struct otx2_ssogws_dual *ws = port; \
uint64_t cmd[sz]; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(ws->base[!ws->vws], &ev[0], \
cmd, (const uint64_t \
(*)[RTE_MAX_QUEUES_PER_PORT]) \
&ws->tx_adptr_data, flags); \
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_hot \
otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events) \
{ \
uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
struct otx2_ssogws_dual *ws = port; \
\
RTE_SET_USED(nb_events); \
return otx2_ssogws_event_tx(ws->base[!ws->vws], &ev[0], \
cmd, (const uint64_t \
(*)[RTE_MAX_QUEUES_PER_PORT]) \
&ws->tx_adptr_data, \
(flags) | NIX_TX_MULTI_SEG_F);\
}
SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
#undef T

Some files were not shown because too many files have changed in this diff Show More