raw/cnxk_bphy: add BPHY CGX/RPM skeleton driver

Add baseband PHY CGX/RPM skeleton driver which merely probes a matching
device. CGX/RPM are Ethernet MACs hardwired to baseband subsystem.

Signed-off-by: Tomasz Duszynski <tduszynski@marvell.com>
Signed-off-by: Jakub Palider <jpalider@marvell.com>
Reviewed-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Tomasz Duszynski 2021-06-21 17:04:26 +02:00 committed by Thomas Monjalon
parent 2c19694c8e
commit 3d27e49e07
10 changed files with 206 additions and 2 deletions

View File

@ -1292,6 +1292,12 @@ M: Nipun Gupta <nipun.gupta@nxp.com>
F: drivers/raw/dpaa2_cmdif/
F: doc/guides/rawdevs/dpaa2_cmdif.rst
Marvell CNXK BPHY
M: Jakub Palider <jpalider@marvell.com>
M: Tomasz Duszynski <tduszynski@marvell.com>
F: doc/guides/rawdevs/cnxk_bphy.rst
F: drivers/raw/cnxk_bphy/
Marvell OCTEON TX2 DMA
M: Radha Mohan Chintakuntla <radhac@marvell.com>
M: Veerasenareddy Burru <vburru@marvell.com>
@ -1312,7 +1318,6 @@ F: doc/guides/rawdevs/ntb.rst
F: examples/ntb/
F: doc/guides/sample_app_ug/ntb.rst
Packet processing
-----------------

View File

@ -145,6 +145,9 @@ This section lists dataplane H/W block(s) available in cnxk SoC.
#. **Mempool Driver**
See :doc:`../mempool/cnxk` for NPA mempool driver information.
#. **Baseband PHY Driver**
See :doc:`../rawdevs/cnxk_bphy` for Baseband PHY driver information.
Procedure to Setup Platform
---------------------------

View File

@ -0,0 +1,23 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2021 Marvell.
Marvell CNXK BPHY Driver
========================
CN10K/CN9K Fusion product families offer an internal BPHY unit which provides
set of hardware accelerators for performing baseband related operations.
Connectivity to the outside world happens through a block called RFOE which is
backed by ethernet I/O block called CGX or RPM (depending on the chip version).
RFOE stands for Radio Frequency Over Ethernet and provides support for
IEEE 1904.3 (RoE) standard.
Device Setup
------------
The BPHY CGX/RPM devices will need to be bound to a user-space IO driver for
use. The script ``dpdk-devbind.py`` script included with DPDK can be used to
view the state of the devices and to bind them to a suitable DPDK-supported
kernel driver. When querying the status of the devices, they will appear under
the category of "Misc (rawdev) devices", i.e. the command
``dpdk-devbind.py --status-dev misc`` can be used to see the state of those
devices alone.

View File

@ -11,6 +11,7 @@ application through rawdev API.
:maxdepth: 2
:numbered:
cnxk_bphy
dpaa2_cmdif
dpaa2_qdma
ifpga

View File

@ -61,6 +61,13 @@ New Features
representing sub-domains of functionality. Each auxiliary device
represents a part of its parent functionality.
* **Added Baseband PHY CNXK PMD.**
Added Baseband PHY PMD which allows to configure BPHY hardware block
comprising accelerators and DSPs specifically tailored for 5G/LTE inline
usecases. Configuration happens via standard rawdev enq/deq operations. See
the :doc:`../rawdevs/cnxk_bphy` rawdev guide for more details on this driver.
Removed Items
-------------

View File

@ -0,0 +1,151 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#include <rte_bus_pci.h>
#include <rte_rawdev.h>
#include <rte_rawdev_pmd.h>
#include <roc_api.h>
struct cnxk_bphy_cgx_queue {
unsigned int lmac;
/* queue holds up to one response */
void *rsp;
};
struct cnxk_bphy_cgx {
struct roc_bphy_cgx *rcgx;
struct cnxk_bphy_cgx_queue queues[MAX_LMACS_PER_CGX];
unsigned int num_queues;
};
static void
cnxk_bphy_cgx_format_name(char *name, unsigned int len,
struct rte_pci_device *pci_dev)
{
snprintf(name, len, "BPHY_CGX:%x:%02x.%x", pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
}
static const struct rte_rawdev_ops cnxk_bphy_cgx_rawdev_ops = {
};
static void
cnxk_bphy_cgx_init_queues(struct cnxk_bphy_cgx *cgx)
{
struct roc_bphy_cgx *rcgx = cgx->rcgx;
unsigned int i;
for (i = 0; i < RTE_DIM(cgx->queues); i++) {
if (!(rcgx->lmac_bmap & BIT_ULL(i)))
continue;
cgx->queues[cgx->num_queues++].lmac = i;
}
}
static void
cnxk_bphy_cgx_fini_queues(struct cnxk_bphy_cgx *cgx)
{
unsigned int i;
for (i = 0; i < cgx->num_queues; i++) {
if (cgx->queues[i].rsp)
rte_free(cgx->queues[i].rsp);
}
cgx->num_queues = 0;
}
static int
cnxk_bphy_cgx_rawdev_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
{
char name[RTE_RAWDEV_NAME_MAX_LEN];
struct rte_rawdev *rawdev;
struct cnxk_bphy_cgx *cgx;
struct roc_bphy_cgx *rcgx;
int ret;
RTE_SET_USED(pci_drv);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
if (!pci_dev->mem_resource[0].addr)
return -ENODEV;
ret = roc_plt_init();
if (ret)
return ret;
cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
rawdev = rte_rawdev_pmd_allocate(name, sizeof(*cgx), rte_socket_id());
if (!rawdev)
return -ENOMEM;
rawdev->dev_ops = &cnxk_bphy_cgx_rawdev_ops;
rawdev->device = &pci_dev->device;
rawdev->driver_name = pci_dev->driver->driver.name;
cgx = rawdev->dev_private;
cgx->rcgx = rte_zmalloc(NULL, sizeof(*rcgx), 0);
if (!cgx->rcgx) {
ret = -ENOMEM;
goto out_pmd_release;
}
rcgx = cgx->rcgx;
rcgx->bar0_pa = pci_dev->mem_resource[0].phys_addr;
rcgx->bar0_va = pci_dev->mem_resource[0].addr;
ret = roc_bphy_cgx_dev_init(rcgx);
if (ret)
goto out_free;
cnxk_bphy_cgx_init_queues(cgx);
return 0;
out_free:
rte_free(rcgx);
out_pmd_release:
rte_rawdev_pmd_release(rawdev);
return ret;
}
static int
cnxk_bphy_cgx_rawdev_remove(struct rte_pci_device *pci_dev)
{
char name[RTE_RAWDEV_NAME_MAX_LEN];
struct rte_rawdev *rawdev;
struct cnxk_bphy_cgx *cgx;
cnxk_bphy_cgx_format_name(name, sizeof(name), pci_dev);
rawdev = rte_rawdev_pmd_get_named_dev(name);
if (!rawdev)
return -ENODEV;
cgx = rawdev->dev_private;
cnxk_bphy_cgx_fini_queues(cgx);
roc_bphy_cgx_dev_fini(cgx->rcgx);
rte_free(cgx->rcgx);
return rte_rawdev_pmd_release(rawdev);
}
static const struct rte_pci_id cnxk_bphy_cgx_map[] = {
{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_CGX)},
{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM)},
{} /* sentinel */
};
static struct rte_pci_driver bphy_cgx_rawdev_pmd = {
.id_table = cnxk_bphy_cgx_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
.probe = cnxk_bphy_cgx_rawdev_probe,
.remove = cnxk_bphy_cgx_rawdev_remove,
};
RTE_PMD_REGISTER_PCI(cnxk_bphy_cgx_rawdev_pci_driver, bphy_cgx_rawdev_pmd);
RTE_PMD_REGISTER_PCI_TABLE(cnxk_bphy_cgx_rawdev_pci_driver, cnxk_bphy_cgx_map);
RTE_PMD_REGISTER_KMOD_DEP(cnxk_bphy_cgx_rawdev_pci_driver, "vfio-pci");

View File

@ -0,0 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(C) 2021 Marvell.
#
deps += ['bus_pci', 'common_cnxk', 'rawdev']
sources = files(
'cnxk_bphy_cgx.c',
)

View File

@ -0,0 +1,3 @@
DPDK_21 {
local: *;
};

View File

@ -6,6 +6,7 @@ if is_windows
endif
drivers = [
'cnxk_bphy',
'dpaa2_cmdif',
'dpaa2_qdma',
'ifpga',

View File

@ -45,6 +45,8 @@
'SVendor': None, 'SDevice': None}
octeontx2_ree = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f4',
'SVendor': None, 'SDevice': None}
cnxk_bphy_cgx = {'Class': '08', 'Vendor': '177d', 'Device': 'a059,a060',
'SVendor': None, 'SDevice': None}
intel_dlb = {'Class': '0b', 'Vendor': '8086', 'Device': '270b,2710,2714',
'SVendor': None, 'SDevice': None}
@ -69,7 +71,7 @@
mempool_devices = [cavium_fpa, octeontx2_npa]
compress_devices = [cavium_zip]
regex_devices = [octeontx2_ree]
misc_devices = [intel_ioat_bdw, intel_ioat_skx, intel_ioat_icx, intel_idxd_spr,
misc_devices = [cnxk_bphy_cgx, intel_ioat_bdw, intel_ioat_skx, intel_ioat_icx, intel_idxd_spr,
intel_ntb_skx, intel_ntb_icx,
octeontx2_dma]