dma/dpaa: support DMA operations

This patch support copy, submit, completed and
completed status functionality of DMA driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
This commit is contained in:
Gagandeep Singh 2021-11-09 10:09:09 +05:30 committed by Thomas Monjalon
parent 453d8273d4
commit 7da29a644c
3 changed files with 376 additions and 0 deletions

View File

@ -48,6 +48,18 @@ Initialization
On EAL initialization, DPAA DMA devices will be detected on DPAA bus and On EAL initialization, DPAA DMA devices will be detected on DPAA bus and
will be probed and populated into their device list. will be probed and populated into their device list.
Features
--------
The DPAA DMA implements following features in the dmadev API:
- Supports 1 virtual channel.
- Supports all 4 DMA transfers: MEM_TO_MEM, MEM_TO_DEV,
DEV_TO_MEM, DEV_TO_DEV.
- Supports DMA silent mode.
- Supports issuing DMA of data within memory without hogging CPU while
performing DMA operation.
Platform Requirement Platform Requirement
-------------------- --------------------

View File

@ -15,12 +15,50 @@ qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr)); ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
} }
static inline u64
qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
{
return ccdf->cfg8b_w1 & 0xff;
}
static inline int
qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
{
return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
>> QDMA_CCDF_OFFSET;
}
static inline void
qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
{
ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
}
static inline int
qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
{
return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
>> QDMA_CCDF_STATUS;
}
static inline void
qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
{
ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
}
static inline void static inline void
qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len) qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
{ {
csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK); csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
} }
static inline void
qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
{
csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
}
static inline int static inline int
ilog2(int x) ilog2(int x)
{ {
@ -47,6 +85,18 @@ qdma_writel(u32 val, void *addr)
QDMA_OUT(addr, val); QDMA_OUT(addr, val);
} }
static u32
qdma_readl_be(void *addr)
{
return QDMA_IN_BE(addr);
}
static void
qdma_writel_be(u32 val, void *addr)
{
QDMA_OUT_BE(addr, val);
}
static void static void
*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr) *dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
{ {
@ -104,6 +154,32 @@ finally:
fsl_qdma->desc_allocated--; fsl_qdma->desc_allocated--;
} }
static void
fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
struct fsl_qdma_format *csgf_src, *csgf_dest;
/* Note: command table (fsl_comp->virt_addr) is getting filled
* directly in cmd descriptors of queues while enqueuing the descriptor
* please refer fsl_qdma_enqueue_desc
* frame list table (virt_addr) + 1) and source,
* destination descriptor table
* (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
* the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
*/
csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
/* Status notification is enqueued to status queue. */
qdma_desc_addr_set64(csgf_src, src);
qdma_csgf_set_len(csgf_src, len);
qdma_desc_addr_set64(csgf_dest, dst);
qdma_csgf_set_len(csgf_dest, len);
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
}
/* /*
* Pre-request command descriptor and compound S/G for enqueue. * Pre-request command descriptor and compound S/G for enqueue.
*/ */
@ -175,6 +251,26 @@ fail:
return -ENOMEM; return -ENOMEM;
} }
/*
* Request a command descriptor for enqueue.
*/
static struct fsl_qdma_comp *
fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
{
struct fsl_qdma_queue *queue = fsl_chan->queue;
struct fsl_qdma_comp *comp_temp;
if (!list_empty(&queue->comp_free)) {
comp_temp = list_first_entry(&queue->comp_free,
struct fsl_qdma_comp,
list);
list_del(&comp_temp->list);
return comp_temp;
}
return NULL;
}
static struct fsl_qdma_queue static struct fsl_qdma_queue
*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma) *fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
{ {
@ -324,6 +420,54 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
return 0; return 0;
} }
static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
void *block, int id, const uint16_t nb_cpls,
uint16_t *last_idx,
enum rte_dma_status_code *status)
{
struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
struct fsl_qdma_queue *temp_queue;
struct fsl_qdma_format *status_addr;
struct fsl_qdma_comp *fsl_comp = NULL;
u32 reg, i;
int count = 0;
while (count < nb_cpls) {
reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
if (reg & FSL_QDMA_BSQSR_QE_BE)
return count;
status_addr = fsl_status->virt_head;
i = qdma_ccdf_get_queue(status_addr) +
id * fsl_qdma->n_queues;
temp_queue = fsl_queue + i;
fsl_comp = list_first_entry(&temp_queue->comp_used,
struct fsl_qdma_comp,
list);
list_del(&fsl_comp->list);
reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
reg |= FSL_QDMA_BSQMR_DI_BE;
qdma_desc_addr_set64(status_addr, 0x0);
fsl_status->virt_head++;
if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
fsl_status->virt_head = fsl_status->cq;
qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
*last_idx = fsl_comp->index;
if (status != NULL)
status[count] = RTE_DMA_STATUS_SUCCESSFUL;
list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
count++;
}
return count;
}
static int static int
fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
{ {
@ -419,6 +563,66 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
return 0; return 0;
} }
static void *
fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
dma_addr_t src, size_t len,
void *call_back,
void *param)
{
struct fsl_qdma_comp *fsl_comp;
fsl_comp =
fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
if (!fsl_comp)
return NULL;
fsl_comp->qchan = fsl_chan;
fsl_comp->call_back_func = call_back;
fsl_comp->params = param;
fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
return (void *)fsl_comp;
}
static int
fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
struct fsl_qdma_comp *fsl_comp,
uint64_t flags)
{
struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
void *block = fsl_queue->block_base;
struct fsl_qdma_format *ccdf;
u32 reg;
/* retrieve and store the register value in big endian
* to avoid bits swap
*/
reg = qdma_readl_be(block +
FSL_QDMA_BCQSR(fsl_queue->id));
if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
return -1;
/* filling descriptor command table */
ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
fsl_queue->virt_head++;
if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
fsl_queue->virt_head = fsl_queue->cq;
list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
reg |= FSL_QDMA_BCQMR_EI_BE;
qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
}
return fsl_comp->index;
}
static int static int
fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan) fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
{ {
@ -535,6 +739,132 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
return dpaa_get_channel(fsl_qdma, vchan); return dpaa_get_channel(fsl_qdma, vchan);
} }
static int
dpaa_qdma_submit(void *dev_private, uint16_t vchan)
{
struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
struct fsl_qdma_chan *fsl_chan =
&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
void *block = fsl_queue->block_base;
u32 reg;
while (fsl_queue->pending) {
reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
reg |= FSL_QDMA_BCQMR_EI_BE;
qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
fsl_queue->pending--;
}
return 0;
}
static int
dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
rte_iova_t src, rte_iova_t dst,
uint32_t length, uint64_t flags)
{
struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
struct fsl_qdma_chan *fsl_chan =
&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
int ret;
void *fsl_comp = NULL;
fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
(dma_addr_t)dst, (dma_addr_t)src,
length, NULL, NULL);
if (!fsl_comp) {
DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n");
return -1;
}
ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
return ret;
}
static uint16_t
dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
const uint16_t nb_cpls, uint16_t *last_idx,
enum rte_dma_status_code *st)
{
struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
void *block;
int intr;
void *status = fsl_qdma->status_base;
intr = qdma_readl_be(status + FSL_QDMA_DEDR);
if (intr) {
DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECBR);
DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
qdma_writel(0xffffffff,
status + FSL_QDMA_DEDR);
intr = qdma_readl(status + FSL_QDMA_DEDR);
}
block = fsl_qdma->block_base +
FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
last_idx, st);
return intr;
}
static uint16_t
dpaa_qdma_dequeue(void *dev_private,
uint16_t vchan, const uint16_t nb_cpls,
uint16_t *last_idx, bool *has_error)
{
struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
void *block;
int intr;
void *status = fsl_qdma->status_base;
intr = qdma_readl_be(status + FSL_QDMA_DEDR);
if (intr) {
DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
intr = qdma_readl(status + FSL_QDMA_DECBR);
DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
qdma_writel(0xffffffff,
status + FSL_QDMA_DEDR);
intr = qdma_readl(status + FSL_QDMA_DEDR);
*has_error = true;
}
block = fsl_qdma->block_base +
FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
last_idx, NULL);
return intr;
}
static struct rte_dma_dev_ops dpaa_qdma_ops = { static struct rte_dma_dev_ops dpaa_qdma_ops = {
.dev_info_get = dpaa_info_get, .dev_info_get = dpaa_info_get,
.dev_configure = dpaa_qdma_configure, .dev_configure = dpaa_qdma_configure,
@ -652,6 +982,10 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
dmadev->dev_ops = &dpaa_qdma_ops; dmadev->dev_ops = &dpaa_qdma_ops;
dmadev->device = &dpaa_dev->device; dmadev->device = &dpaa_dev->device;
dmadev->fp_obj->dev_private = dmadev->data->dev_private; dmadev->fp_obj->dev_private = dmadev->data->dev_private;
dmadev->fp_obj->copy = dpaa_qdma_enqueue;
dmadev->fp_obj->submit = dpaa_qdma_submit;
dmadev->fp_obj->completed = dpaa_qdma_dequeue;
dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
/* Invoke PMD device initialization function */ /* Invoke PMD device initialization function */
ret = dpaa_qdma_init(dmadev); ret = dpaa_qdma_init(dmadev);

View File

@ -7,6 +7,10 @@
#include <rte_io.h> #include <rte_io.h>
#ifndef BIT
#define BIT(nr) (1UL << (nr))
#endif
#define CORE_NUMBER 4 #define CORE_NUMBER 4
#define RETRIES 5 #define RETRIES 5
@ -18,8 +22,16 @@
#define FSL_QDMA_DMR 0x0 #define FSL_QDMA_DMR 0x0
#define FSL_QDMA_DSR 0x4 #define FSL_QDMA_DSR 0x4
#define FSL_QDMA_DEDR 0xe04
#define FSL_QDMA_DECFDW0R 0xe10
#define FSL_QDMA_DECFDW1R 0xe14
#define FSL_QDMA_DECFDW2R 0xe18
#define FSL_QDMA_DECFDW3R 0xe1c
#define FSL_QDMA_DECFQIDR 0xe30
#define FSL_QDMA_DECBR 0xe34
#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x)) #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x)) #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x)) #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x)) #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
@ -32,6 +44,7 @@
#define FSL_QDMA_SQEEPAR 0x810 #define FSL_QDMA_SQEEPAR 0x810
#define FSL_QDMA_SQEPAR 0x814 #define FSL_QDMA_SQEPAR 0x814
#define FSL_QDMA_BSQMR 0x800 #define FSL_QDMA_BSQMR 0x800
#define FSL_QDMA_BSQSR 0x804
#define FSL_QDMA_BSQICR 0x828 #define FSL_QDMA_BSQICR 0x828
#define FSL_QDMA_CQIER 0xa10 #define FSL_QDMA_CQIER 0xa10
#define FSL_QDMA_SQCCMR 0xa20 #define FSL_QDMA_SQCCMR 0xa20
@ -41,12 +54,18 @@
#define FSL_QDMA_QUEUE_MAX 8 #define FSL_QDMA_QUEUE_MAX 8
#define FSL_QDMA_BCQMR_EN 0x80000000 #define FSL_QDMA_BCQMR_EN 0x80000000
#define FSL_QDMA_BCQMR_EI_BE 0x40
#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20) #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16) #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
#define FSL_QDMA_BCQSR_QF_XOFF_BE 0x1000100
#define FSL_QDMA_BSQMR_EN 0x80000000 #define FSL_QDMA_BSQMR_EN 0x80000000
#define FSL_QDMA_BSQMR_DI_BE 0x40
#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16) #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
#define FSL_QDMA_BSQSR_QE_BE 0x200
#define FSL_QDMA_DMR_DQD 0x40000000 #define FSL_QDMA_DMR_DQD 0x40000000
#define FSL_QDMA_DSR_DB 0x80000000 #define FSL_QDMA_DSR_DB 0x80000000
@ -62,6 +81,13 @@
#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28 #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
#define FSL_QDMA_CMD_LWC_OFFSET 16 #define FSL_QDMA_CMD_LWC_OFFSET 16
#define QDMA_CCDF_STATUS 20
#define QDMA_CCDF_OFFSET 20
#define QDMA_CCDF_MASK GENMASK(28, 20)
#define QDMA_CCDF_FOTMAT BIT(29)
#define QDMA_CCDF_SER BIT(30)
#define QDMA_SG_FIN BIT(30)
#define QDMA_SG_LEN_MASK GENMASK(29, 0) #define QDMA_SG_LEN_MASK GENMASK(29, 0)
#define COMMAND_QUEUE_OVERFLOW 10 #define COMMAND_QUEUE_OVERFLOW 10
@ -80,9 +106,13 @@
#ifdef QDMA_BIG_ENDIAN #ifdef QDMA_BIG_ENDIAN
#define QDMA_IN(addr) be32_to_cpu(rte_read32(addr)) #define QDMA_IN(addr) be32_to_cpu(rte_read32(addr))
#define QDMA_OUT(addr, val) rte_write32(be32_to_cpu(val), addr) #define QDMA_OUT(addr, val) rte_write32(be32_to_cpu(val), addr)
#define QDMA_IN_BE(addr) rte_read32(addr)
#define QDMA_OUT_BE(addr, val) rte_write32(val, addr)
#else #else
#define QDMA_IN(addr) rte_read32(addr) #define QDMA_IN(addr) rte_read32(addr)
#define QDMA_OUT(addr, val) rte_write32(val, addr) #define QDMA_OUT(addr, val) rte_write32(val, addr)
#define QDMA_IN_BE(addr) be32_to_cpu(rte_write32(addr))
#define QDMA_OUT_BE(addr, val) rte_write32(be32_to_cpu(val), addr)
#endif #endif
#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \ #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \