dma/dpaa2: introduce driver skeleton

The DPAA2 DMA  driver is an implementation of the dmadev APIs,
that provide means to initiate a DMA transaction from CPU.
Earlier this was part of RAW driver, but with DMA drivers
added as separate flavor of drivers, this driver is being
moved to DMA drivers.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Nipun Gupta 2022-05-05 14:35:18 +05:30 committed by Thomas Monjalon
parent 33584c19dd
commit 8caf8427f8
11 changed files with 733 additions and 0 deletions

View File

@ -1203,6 +1203,12 @@ M: Nipun Gupta <nipun.gupta@nxp.com>
F: drivers/dma/dpaa/
F: doc/guides/dmadevs/dpaa.rst
NXP DPAA2 QDMA
M: Nipun Gupta <nipun.gupta@nxp.com>
M: Hemant Agrawal <hemant.agrawal@nxp.com>
F: drivers/dma/dpaa2/
F: doc/guides/dmadevs/dpaa2.rst
RegEx Drivers
-------------

View File

@ -0,0 +1,64 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2018-2022 NXP
NXP DPAA2 QDMA Driver
=====================
The DPAA2 QDMA is an implementation of the dmadev API, that provide means
to initiate a DMA transaction from CPU. The initiated DMA is performed
without CPU being involved in the actual DMA transaction. This is achieved
via using the DPDMAI device exposed by MC.
More information can be found at `NXP Official Website
<http://www.nxp.com/products/microcontrollers-and-processors/arm-processors/qoriq-arm-processors:QORIQ-ARM>`_.
Supported DPAA2 SoCs
--------------------
- LX2160A
- LS2084A/LS2044A
- LS2088A/LS2048A
- LS1088A/LS1048A
Prerequisites
-------------
See :doc:`../platform/dpaa2` for setup information
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
.. note::
Some part of fslmc bus code (mc flib - object library) routines are
dual licensed (BSD & GPLv2).
Enabling logs
-------------
For enabling logs, use the following EAL parameter:
.. code-block:: console
./your_qdma_application <EAL args> --log-level=pmd.dma.dpaa2.qdma,<level>
Using ``pmd.dma.dpaa2.qdma`` as log matching criteria, all Event PMD logs can be
enabled which are lower than logging ``level``.
Initialization
--------------
The DPAA2 QDMA is exposed as a dma device which consists of dpdmai devices.
On EAL initialization, dpdmai devices will be probed and populated into the
dmadevices. The dmadev ID of the device can be obtained using
* Invoking ``rte_dma_get_dev_id_by_name("dpdmai.x")`` from the application
where x is the object ID of the DPDMAI object created by MC. Use can
use this index for further rawdev function calls.
Platform Requirement
~~~~~~~~~~~~~~~~~~~~
DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
``Supported DPAA2 SoCs``.

View File

@ -13,6 +13,7 @@ an application through DMA API.
cnxk
dpaa
dpaa2
hisilicon
idxd
ioat

View File

@ -40,6 +40,10 @@ Common Offload HW Block Drivers
See :doc:`../rawdevs/dpaa2_cmdif` for NXP dpaa2 AIOP command interface driver information.
5. **DMA Driver**
See :doc:`../dmadevs/dpaa2` for NXP dpaa2 QDMA driver information.
Steps To Setup Platform
-----------------------

View File

@ -123,6 +123,7 @@ struct rte_dpaa2_device {
union {
struct rte_eth_dev *eth_dev; /**< ethernet device */
struct rte_cryptodev *cryptodev; /**< Crypto Device */
struct rte_dma_dev *dmadev; /**< DMA Device */
struct rte_rawdev *rawdev; /**< Raw Device */
};
enum rte_dpaa2_dev_type dev_type; /**< Device Type */

View File

@ -0,0 +1,275 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018-2022 NXP
*/
#include <rte_eal.h>
#include <rte_fslmc.h>
#include <rte_dmadev.h>
#include <rte_dmadev_pmd.h>
#include <mc/fsl_dpdmai.h>
#include "dpaa2_qdma.h"
#include "dpaa2_qdma_logs.h"
/* Dynamic log type identifier */
int dpaa2_qdma_logtype;
uint32_t dpaa2_coherent_no_alloc_cache;
uint32_t dpaa2_coherent_alloc_cache;
static int
dpaa2_qdma_reset(struct rte_dma_dev *dev)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
int i;
DPAA2_QDMA_FUNC_TRACE();
/* In case QDMA device is not in stopped state, return -EBUSY */
if (qdma_dev->state == 1) {
DPAA2_QDMA_ERR(
"Device is in running state. Stop before reset.");
return -EBUSY;
}
/* In case there are pending jobs on any VQ, return -EBUSY */
for (i = 0; i < qdma_dev->num_vqs; i++) {
if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
qdma_dev->vqs[i].num_dequeues)) {
DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
return -EBUSY;
}
}
/* Reset and free virtual queues */
for (i = 0; i < qdma_dev->num_vqs; i++) {
if (qdma_dev->vqs[i].status_ring)
rte_ring_free(qdma_dev->vqs[i].status_ring);
}
if (qdma_dev->vqs)
rte_free(qdma_dev->vqs);
qdma_dev->vqs = NULL;
/* Reset QDMA device structure */
qdma_dev->num_vqs = 0;
return 0;
}
static struct rte_dma_dev_ops dpaa2_qdma_ops = {
};
static int
dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
int ret;
DPAA2_QDMA_FUNC_TRACE();
ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
dpdmai_dev->token);
if (ret)
DPAA2_QDMA_ERR("dmdmai disable failed");
/* Set up the DQRR storage for Rx */
struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
if (rxq->q_storage) {
dpaa2_free_dq_storage(rxq->q_storage);
rte_free(rxq->q_storage);
}
/* Close the device at underlying layer*/
ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
if (ret)
DPAA2_QDMA_ERR("Failure closing dpdmai device");
return 0;
}
static int
dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
struct dpdmai_rx_queue_cfg rx_queue_cfg;
struct dpdmai_attr attr;
struct dpdmai_rx_queue_attr rx_attr;
struct dpdmai_tx_queue_attr tx_attr;
struct dpaa2_queue *rxq;
int ret;
DPAA2_QDMA_FUNC_TRACE();
/* Open DPDMAI device */
dpdmai_dev->dpdmai_id = dpdmai_id;
dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
RTE_CACHE_LINE_SIZE);
ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
if (ret) {
DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
return ret;
}
/* Get DPDMAI attributes */
ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
dpdmai_dev->token, &attr);
if (ret) {
DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
ret);
goto init_err;
}
dpdmai_dev->num_queues = attr.num_of_queues;
/* Set up Rx Queue */
memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
CMD_PRI_LOW,
dpdmai_dev->token,
0, 0, &rx_queue_cfg);
if (ret) {
DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
ret);
goto init_err;
}
/* Allocate DQ storage for the DPDMAI Rx queues */
rxq = &(dpdmai_dev->rx_queue[0]);
rxq->q_storage = rte_malloc("dq_storage",
sizeof(struct queue_storage_info_t),
RTE_CACHE_LINE_SIZE);
if (!rxq->q_storage) {
DPAA2_QDMA_ERR("q_storage allocation failed");
ret = -ENOMEM;
goto init_err;
}
memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
ret = dpaa2_alloc_dq_storage(rxq->q_storage);
if (ret) {
DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
goto init_err;
}
/* Get Rx and Tx queues FQID */
ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
dpdmai_dev->token, 0, 0, &rx_attr);
if (ret) {
DPAA2_QDMA_ERR("Reading device failed with err: %d",
ret);
goto init_err;
}
dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
dpdmai_dev->token, 0, 0, &tx_attr);
if (ret) {
DPAA2_QDMA_ERR("Reading device failed with err: %d",
ret);
goto init_err;
}
dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
/* Enable the device */
ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
dpdmai_dev->token);
if (ret) {
DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
goto init_err;
}
if (!dpaa2_coherent_no_alloc_cache) {
if (dpaa2_svr_family == SVR_LX2160A) {
dpaa2_coherent_no_alloc_cache =
DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
dpaa2_coherent_alloc_cache =
DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
} else {
dpaa2_coherent_no_alloc_cache =
DPAA2_COHERENT_NO_ALLOCATE_CACHE;
dpaa2_coherent_alloc_cache =
DPAA2_COHERENT_ALLOCATE_CACHE;
}
}
DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
/* Reset the QDMA device */
ret = dpaa2_qdma_reset(dev);
if (ret) {
DPAA2_QDMA_ERR("Resetting QDMA failed");
goto init_err;
}
return 0;
init_err:
dpaa2_dpdmai_dev_uninit(dev);
return ret;
}
static int
dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_dma_dev *dmadev;
int ret;
DPAA2_QDMA_FUNC_TRACE();
RTE_SET_USED(dpaa2_drv);
dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
rte_socket_id(),
sizeof(struct dpaa2_dpdmai_dev));
if (!dmadev) {
DPAA2_QDMA_ERR("Unable to allocate dmadevice");
return -EINVAL;
}
dpaa2_dev->dmadev = dmadev;
dmadev->dev_ops = &dpaa2_qdma_ops;
dmadev->device = &dpaa2_dev->device;
dmadev->fp_obj->dev_private = dmadev->data->dev_private;
/* Invoke PMD device initialization function */
ret = dpaa2_dpdmai_dev_init(dmadev, dpaa2_dev->object_id);
if (ret) {
rte_dma_pmd_release(dpaa2_dev->device.name);
return ret;
}
dmadev->state = RTE_DMA_DEV_READY;
return 0;
}
static int
dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_dma_dev *dmadev = dpaa2_dev->dmadev;
int ret;
DPAA2_QDMA_FUNC_TRACE();
dpaa2_dpdmai_dev_uninit(dmadev);
ret = rte_dma_pmd_release(dpaa2_dev->device.name);
if (ret)
DPAA2_QDMA_ERR("Device cleanup failed");
return 0;
}
static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd;
static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
.drv_type = DPAA2_QDMA,
.probe = dpaa2_qdma_probe,
.remove = dpaa2_qdma_remove,
};
RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
"no_prefetch=<int> ");
RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO);

View File

@ -0,0 +1,316 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018-2022 NXP
*/
#ifndef _DPAA2_QDMA_H_
#define _DPAA2_QDMA_H_
#define DPAA2_QDMA_MAX_DESC 1024
#define DPAA2_QDMA_MIN_DESC 1
#define DPAA2_QDMA_MAX_VHANS 64
#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT (1ULL << 0)
#define DPAA2_QDMA_VQ_FD_SG_FORMAT (1ULL << 1)
#define DPAA2_QDMA_VQ_NO_RESPONSE (1ULL << 2)
#define DPAA2_QDMA_MAX_FLE 3
#define DPAA2_QDMA_MAX_SDD 2
#define DPAA2_QDMA_MAX_SG_NB 64
#define DPAA2_DPDMAI_MAX_QUEUES 1
/** FLE single job pool size: job pointer(uint64_t) +
* 3 Frame list + 2 source/destination descriptor.
*/
#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
/** FLE sg jobs pool size: job number(uint64_t) +
* 3 Frame list + 2 source/destination descriptor +
* 64 (src + dst) sg entries + 64 jobs pointers.
*/
#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
#define QDMA_FLE_JOB_NB_OFFSET 0
#define QDMA_FLE_SINGLE_JOB_OFFSET 0
#define QDMA_FLE_FLE_OFFSET \
(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
#define QDMA_FLE_SDD_OFFSET \
(QDMA_FLE_FLE_OFFSET + \
sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
#define QDMA_FLE_SG_ENTRY_OFFSET \
(QDMA_FLE_SDD_OFFSET + \
sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
#define QDMA_FLE_SG_JOBS_OFFSET \
(QDMA_FLE_SG_ENTRY_OFFSET + \
sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
/** FLE pool cache size */
#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
/** Notification by FQD_CTX[fqid] */
#define QDMA_SER_CTX (1 << 8)
#define DPAA2_RBP_MEM_RW 0x0
/**
* Source descriptor command read transaction type for RBP=0:
* coherent copy of cacheable memory
*/
#define DPAA2_COHERENT_NO_ALLOCATE_CACHE 0xb
#define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE 0x7
/**
* Destination descriptor command write transaction type for RBP=0:
* coherent copy of cacheable memory
*/
#define DPAA2_COHERENT_ALLOCATE_CACHE 0x6
#define DPAA2_LX2_COHERENT_ALLOCATE_CACHE 0xb
/** Maximum possible H/W Queues on each core */
#define MAX_HW_QUEUE_PER_CORE 64
#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
/** Determines a QDMA job */
struct dpaa2_qdma_job {
/** Source Address from where DMA is (to be) performed */
uint64_t src;
/** Destination Address where DMA is (to be) done */
uint64_t dest;
/** Length of the DMA operation in bytes. */
uint32_t len;
/** See RTE_QDMA_JOB_ flags */
uint32_t flags;
/**
* Status of the transaction.
* This is filled in the dequeue operation by the driver.
* upper 8bits acc_err for route by port.
* lower 8bits fd error
*/
uint16_t status;
uint16_t vq_id;
/**
* FLE pool element maintained by user, in case no qDMA response.
* Note: the address must be allocated from DPDK memory pool.
*/
void *usr_elem;
};
struct dpaa2_qdma_rbp {
uint32_t use_ultrashort:1;
uint32_t enable:1;
/**
* dportid:
* 0000 PCI-Express 1
* 0001 PCI-Express 2
* 0010 PCI-Express 3
* 0011 PCI-Express 4
* 0100 PCI-Express 5
* 0101 PCI-Express 6
*/
uint32_t dportid:4;
uint32_t dpfid:2;
uint32_t dvfid:6;
/*using route by port for destination */
uint32_t drbp:1;
/**
* sportid:
* 0000 PCI-Express 1
* 0001 PCI-Express 2
* 0010 PCI-Express 3
* 0011 PCI-Express 4
* 0100 PCI-Express 5
* 0101 PCI-Express 6
*/
uint32_t sportid:4;
uint32_t spfid:2;
uint32_t svfid:6;
/* using route by port for source */
uint32_t srbp:1;
uint32_t rsv:4;
};
/** Source/Destination Descriptor */
struct qdma_sdd {
uint32_t rsv;
/** Stride configuration */
uint32_t stride;
/** Route-by-port command */
union {
uint32_t rbpcmd;
struct rbpcmd_st {
uint32_t vfid:6;
uint32_t rsv4:2;
uint32_t pfid:1;
uint32_t rsv3:7;
uint32_t attr:3;
uint32_t rsv2:1;
uint32_t at:2;
uint32_t vfa:1;
uint32_t ca:1;
uint32_t tc:3;
uint32_t rsv1:5;
} rbpcmd_simple;
};
union {
uint32_t cmd;
struct rcmd_simple {
uint32_t portid:4;
uint32_t rsv1:14;
uint32_t rbp:1;
uint32_t ssen:1;
uint32_t rthrotl:4;
uint32_t sqos:3;
uint32_t ns:1;
uint32_t rdtype:4;
} read_cmd;
struct wcmd_simple {
uint32_t portid:4;
uint32_t rsv3:10;
uint32_t rsv2:2;
uint32_t lwc:2;
uint32_t rbp:1;
uint32_t dsen:1;
uint32_t rsv1:4;
uint32_t dqos:3;
uint32_t ns:1;
uint32_t wrttype:4;
} write_cmd;
};
} __rte_packed;
#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
#define QDMA_SG_SL_SHORT 0x1 /* short length */
#define QDMA_SG_SL_LONG 0x0 /* long length */
#define QDMA_SG_F 0x1 /* last sg entry */
#define QDMA_SG_BMT_ENABLE 0x1
#define QDMA_SG_BMT_DISABLE 0x0
struct qdma_sg_entry {
uint32_t addr_lo; /* address 0:31 */
uint32_t addr_hi:17; /* address 32:48 */
uint32_t rsv:15;
union {
uint32_t data_len_sl0; /* SL=0, the long format */
struct {
uint32_t len:17; /* SL=1, the short format */
uint32_t reserve:3;
uint32_t sf:1;
uint32_t sr:1;
uint32_t size:10; /* buff size */
} data_len_sl1;
} data_len; /* AVAIL_LENGTH */
union {
uint32_t ctrl_fields;
struct {
uint32_t bpid:14;
uint32_t ivp:1;
uint32_t bmt:1;
uint32_t offset:12;
uint32_t fmt:2;
uint32_t sl:1;
uint32_t f:1;
} ctrl;
};
} __rte_packed;
/** Represents a DPDMAI device */
struct dpaa2_dpdmai_dev {
/** Pointer to Next device instance */
TAILQ_ENTRY(dpaa2_qdma_device) next;
/** handle to DPDMAI object */
struct fsl_mc_io dpdmai;
/** HW ID for DPDMAI object */
uint32_t dpdmai_id;
/** Tocken of this device */
uint16_t token;
/** Number of queue in this DPDMAI device */
uint8_t num_queues;
/** RX queues */
struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
/** TX queues */
struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
struct qdma_device *qdma_dev;
};
struct qdma_virt_queue;
typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
const struct qbman_fd *fd,
struct dpaa2_qdma_job **job,
uint16_t *nb_jobs);
typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
struct qbman_fd *fd,
struct dpaa2_qdma_job **job,
uint16_t nb_jobs);
typedef int (qdma_dequeue_multijob_t)(
struct qdma_virt_queue *qdma_vq,
uint16_t *vq_id,
struct dpaa2_qdma_job **job,
uint16_t nb_jobs);
typedef int (qdma_enqueue_multijob_t)(
struct qdma_virt_queue *qdma_vq,
struct dpaa2_qdma_job **job,
uint16_t nb_jobs);
/** Represents a QDMA virtual queue */
struct qdma_virt_queue {
/** Status ring of the virtual queue */
struct rte_ring *status_ring;
/** Associated hw queue */
struct dpaa2_dpdmai_dev *dpdmai_dev;
/** FLE pool for the queue */
struct rte_mempool *fle_pool;
/** Route by port */
struct dpaa2_qdma_rbp rbp;
/** States if this vq is in use or not */
uint8_t in_use;
/** States if this vq has exclusively associated hw queue */
uint8_t exclusive_hw_queue;
/** Number of descriptor for the virtual DMA channel */
uint16_t nb_desc;
/* Total number of enqueues on this VQ */
uint64_t num_enqueues;
/* Total number of dequeues from this VQ */
uint64_t num_dequeues;
uint16_t vq_id;
uint32_t flags;
struct dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
struct rte_mempool *job_pool;
int num_valid_jobs;
struct rte_dma_stats stats;
qdma_set_fd_t *set_fd;
qdma_get_job_t *get_job;
qdma_dequeue_multijob_t *dequeue_job;
qdma_enqueue_multijob_t *enqueue_job;
};
/** Represents a QDMA device. */
struct qdma_device {
/** VQ's of this device */
struct qdma_virt_queue *vqs;
/** Total number of VQ's */
uint16_t num_vqs;
/** Device state - started or stopped */
uint8_t state;
};
#endif /* _DPAA2_QDMA_H_ */

View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018, 2021 NXP
*/
#ifndef __DPAA2_QDMA_LOGS_H__
#define __DPAA2_QDMA_LOGS_H__
#ifdef __cplusplus
extern "C" {
#endif
extern int dpaa2_qdma_logtype;
#define DPAA2_QDMA_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, dpaa2_qdma_logtype, "dpaa2_qdma: " \
fmt "\n", ## args)
#define DPAA2_QDMA_DEBUG(fmt, args...) \
rte_log(RTE_LOG_DEBUG, dpaa2_qdma_logtype, "dpaa2_qdma: %s(): " \
fmt "\n", __func__, ## args)
#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_DEBUG(">>")
#define DPAA2_QDMA_INFO(fmt, args...) \
DPAA2_QDMA_LOG(INFO, fmt, ## args)
#define DPAA2_QDMA_ERR(fmt, args...) \
DPAA2_QDMA_LOG(ERR, fmt, ## args)
#define DPAA2_QDMA_WARN(fmt, args...) \
DPAA2_QDMA_LOG(WARNING, fmt, ## args)
/* DP Logs, toggled out at compile time if level lower than current level */
#define DPAA2_QDMA_DP_LOG(level, fmt, args...) \
RTE_LOG_DP(level, PMD, "dpaa2_qdma: " fmt "\n", ## args)
#define DPAA2_QDMA_DP_DEBUG(fmt, args...) \
DPAA2_QDMA_DP_LOG(DEBUG, fmt, ## args)
#define DPAA2_QDMA_DP_INFO(fmt, args...) \
DPAA2_QDMA_DP_LOG(INFO, fmt, ## args)
#define DPAA2_QDMA_DP_WARN(fmt, args...) \
DPAA2_QDMA_DP_LOG(WARNING, fmt, ## args)
#ifdef __cplusplus
}
#endif
#endif /* __DPAA2_QDMA_LOGS_H__ */

View File

@ -0,0 +1,16 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2021 NXP
if not is_linux
build = false
reason = 'only supported on linux'
endif
build = dpdk_conf.has('RTE_MEMPOOL_DPAA2')
reason = 'missing dependency, DPDK DPAA2 mempool driver'
deps += ['dmadev', 'bus_fslmc', 'mempool_dpaa2', 'ring', 'kvargs']
sources = files('dpaa2_qdma.c')
if cc.has_argument('-Wno-pointer-arith')
cflags += '-Wno-pointer-arith'
endif

View File

@ -0,0 +1,3 @@
DPDK_22 {
local: *;
};

View File

@ -4,6 +4,7 @@
drivers = [
'cnxk',
'dpaa',
'dpaa2',
'hisilicon',
'idxd',
'ioat',