dma/hisilicon: add control path

This patch add control path functions for Kunpeng DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
This commit is contained in:
Chengwen Feng 2021-11-02 20:37:40 +08:00 committed by Thomas Monjalon
parent 9e16317a38
commit 3c5f5f03a0
3 changed files with 494 additions and 0 deletions

View File

@ -31,3 +31,13 @@ The name of the ``dmadev`` created is like "B:D.F-chX", e.g. DMA 0000:7b:00.0
will create four ``dmadev``,
the 1st ``dmadev`` name is "7b:00.0-ch0",
and the 2nd ``dmadev`` name is "7b:00.0-ch1".
Device Configuration
~~~~~~~~~~~~~~~~~~~~~
Kunpeng DMA configuration requirements:
* ``ring_size`` must be a power of two, between 32 and 8192.
* Only one ``vchan`` is supported per ``dmadev``.
* Silent mode is not supported.
* The transfer direction must be set to ``RTE_DMA_DIR_MEM_TO_MEM``.

View File

@ -10,6 +10,8 @@
#include <rte_eal.h>
#include <rte_io.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_pci.h>
#include <rte_dmadev_pmd.h>
@ -41,6 +43,14 @@ hisi_dma_queue_base(struct hisi_dma_dev *hw)
return 0;
}
static volatile void *
hisi_dma_queue_regaddr(struct hisi_dma_dev *hw, uint32_t qoff)
{
uint32_t off = hisi_dma_queue_base(hw) +
hw->queue_id * HISI_DMA_QUEUE_REGION_SIZE + qoff;
return (volatile void *)((char *)hw->io_base + off);
}
static void
hisi_dma_write_reg(void *base, uint32_t off, uint32_t val)
{
@ -103,6 +113,15 @@ hisi_dma_update_queue_bit(struct hisi_dma_dev *hw, uint32_t qoff, uint32_t pos,
hisi_dma_write_queue(hw, qoff, tmp);
}
static void
hisi_dma_update_queue_mbit(struct hisi_dma_dev *hw, uint32_t qoff,
uint32_t mask, bool set)
{
uint32_t tmp = hisi_dma_read_queue(hw, qoff);
tmp = set ? tmp | mask : tmp & ~mask;
hisi_dma_write_queue(hw, qoff, tmp);
}
#define hisi_dma_poll_hw_state(hw, val, cond, sleep_us, timeout_us) ({ \
uint32_t timeout = 0; \
while (timeout++ <= (timeout_us)) { \
@ -154,6 +173,45 @@ hisi_dma_reset_hw(struct hisi_dma_dev *hw)
return 0;
}
static void
hisi_dma_init_hw(struct hisi_dma_dev *hw)
{
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_BASE_L_REG,
lower_32_bits(hw->sqe_iova));
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_BASE_H_REG,
upper_32_bits(hw->sqe_iova));
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_BASE_L_REG,
lower_32_bits(hw->cqe_iova));
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_BASE_H_REG,
upper_32_bits(hw->cqe_iova));
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_DEPTH_REG,
hw->sq_depth_mask);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_DEPTH_REG, hw->cq_depth - 1);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_SQ_TAIL_REG, 0);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_CQ_HEAD_REG, 0);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM0_REG, 0);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM1_REG, 0);
hisi_dma_write_queue(hw, HISI_DMA_QUEUE_ERR_INT_NUM2_REG, 0);
if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM3_REG,
0);
hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM4_REG,
0);
hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM5_REG,
0);
hisi_dma_write_queue(hw, HISI_DMA_HIP08_QUEUE_ERR_INT_NUM6_REG,
0);
hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
HISI_DMA_HIP08_QUEUE_CTRL0_ERR_ABORT_B, false);
hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG,
HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
hisi_dma_update_queue_mbit(hw,
HISI_DMA_HIP08_QUEUE_INT_MASK_REG,
HISI_DMA_HIP08_QUEUE_INT_MASK_M, true);
}
}
static void
hisi_dma_init_gbl(void *pci_bar, uint8_t revision)
{
@ -176,6 +234,301 @@ hisi_dma_reg_layout(uint8_t revision)
return HISI_DMA_REG_LAYOUT_INVALID;
}
static void
hisi_dma_zero_iomem(struct hisi_dma_dev *hw)
{
memset(hw->iomz->addr, 0, hw->iomz_sz);
}
static int
hisi_dma_alloc_iomem(struct hisi_dma_dev *hw, uint16_t ring_size,
const char *dev_name)
{
uint32_t sq_size = sizeof(struct hisi_dma_sqe) * ring_size;
uint32_t cq_size = sizeof(struct hisi_dma_cqe) *
(ring_size + HISI_DMA_CQ_RESERVED);
uint32_t status_size = sizeof(uint16_t) * ring_size;
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *iomz;
uint32_t total_size;
sq_size = RTE_CACHE_LINE_ROUNDUP(sq_size);
cq_size = RTE_CACHE_LINE_ROUNDUP(cq_size);
status_size = RTE_CACHE_LINE_ROUNDUP(status_size);
total_size = sq_size + cq_size + status_size;
(void)snprintf(mz_name, sizeof(mz_name), "hisi_dma:%s", dev_name);
iomz = rte_memzone_reserve(mz_name, total_size, hw->data->numa_node,
RTE_MEMZONE_IOVA_CONTIG);
if (iomz == NULL) {
HISI_DMA_ERR(hw, "malloc %s iomem fail!", mz_name);
return -ENOMEM;
}
hw->iomz = iomz;
hw->iomz_sz = total_size;
hw->sqe = iomz->addr;
hw->cqe = (void *)((char *)iomz->addr + sq_size);
hw->status = (void *)((char *)iomz->addr + sq_size + cq_size);
hw->sqe_iova = iomz->iova;
hw->cqe_iova = iomz->iova + sq_size;
hw->sq_depth_mask = ring_size - 1;
hw->cq_depth = ring_size + HISI_DMA_CQ_RESERVED;
hisi_dma_zero_iomem(hw);
return 0;
}
static void
hisi_dma_free_iomem(struct hisi_dma_dev *hw)
{
if (hw->iomz != NULL)
rte_memzone_free(hw->iomz);
hw->iomz = NULL;
hw->sqe = NULL;
hw->cqe = NULL;
hw->status = NULL;
hw->sqe_iova = 0;
hw->cqe_iova = 0;
hw->sq_depth_mask = 0;
hw->cq_depth = 0;
}
static int
hisi_dma_info_get(const struct rte_dma_dev *dev,
struct rte_dma_info *dev_info,
uint32_t info_sz)
{
RTE_SET_USED(dev);
RTE_SET_USED(info_sz);
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
RTE_DMA_CAPA_OPS_COPY;
dev_info->max_vchans = 1;
dev_info->max_desc = HISI_DMA_MAX_DESC_NUM;
dev_info->min_desc = HISI_DMA_MIN_DESC_NUM;
return 0;
}
static int
hisi_dma_configure(struct rte_dma_dev *dev,
const struct rte_dma_conf *conf,
uint32_t conf_sz)
{
RTE_SET_USED(dev);
RTE_SET_USED(conf);
RTE_SET_USED(conf_sz);
return 0;
}
static int
hisi_dma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
const struct rte_dma_vchan_conf *conf,
uint32_t conf_sz)
{
struct hisi_dma_dev *hw = dev->data->dev_private;
int ret;
RTE_SET_USED(vchan);
RTE_SET_USED(conf_sz);
if (!rte_is_power_of_2(conf->nb_desc)) {
HISI_DMA_ERR(hw, "Number of desc must be power of 2!");
return -EINVAL;
}
hisi_dma_free_iomem(hw);
ret = hisi_dma_alloc_iomem(hw, conf->nb_desc, dev->data->dev_name);
if (ret)
return ret;
return 0;
}
static int
hisi_dma_start(struct rte_dma_dev *dev)
{
struct hisi_dma_dev *hw = dev->data->dev_private;
if (hw->iomz == NULL) {
HISI_DMA_ERR(hw, "Vchan was not setup, start fail!\n");
return -EINVAL;
}
/* Reset the dmadev to a known state, include:
* 1) zero iomem, also include status fields.
* 2) init hardware register.
* 3) init index values to zero.
* 4) init running statistics.
*/
hisi_dma_zero_iomem(hw);
hisi_dma_init_hw(hw);
hw->ridx = 0;
hw->cridx = 0;
hw->sq_head = 0;
hw->sq_tail = 0;
hw->cq_sq_head = 0;
hw->cq_head = 0;
hw->cqs_completed = 0;
hw->cqe_vld = 1;
hw->submitted = 0;
hw->completed = 0;
hw->errors = 0;
hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL0_REG,
HISI_DMA_QUEUE_CTRL0_EN_B, true);
return 0;
}
static int
hisi_dma_stop(struct rte_dma_dev *dev)
{
return hisi_dma_reset_hw(dev->data->dev_private);
}
static int
hisi_dma_close(struct rte_dma_dev *dev)
{
/* The dmadev already stopped */
hisi_dma_free_iomem(dev->data->dev_private);
return 0;
}
static int
hisi_dma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
struct rte_dma_stats *stats,
uint32_t stats_sz)
{
struct hisi_dma_dev *hw = dev->data->dev_private;
RTE_SET_USED(vchan);
RTE_SET_USED(stats_sz);
stats->submitted = hw->submitted;
stats->completed = hw->completed;
stats->errors = hw->errors;
return 0;
}
static int
hisi_dma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
{
struct hisi_dma_dev *hw = dev->data->dev_private;
RTE_SET_USED(vchan);
hw->submitted = 0;
hw->completed = 0;
hw->errors = 0;
return 0;
}
static void
hisi_dma_get_dump_range(struct hisi_dma_dev *hw, uint32_t *start, uint32_t *end)
{
if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
*start = HISI_DMA_HIP08_DUMP_START_REG;
*end = HISI_DMA_HIP08_DUMP_END_REG;
} else {
*start = 0;
*end = 0;
}
}
static void
hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f)
{
#define DUMP_REGNUM_PER_LINE 4
uint32_t start, end;
uint32_t cnt, i;
hisi_dma_get_dump_range(hw, &start, &end);
(void)fprintf(f, " common-register:\n");
cnt = 0;
for (i = start; i <= end; i += sizeof(uint32_t)) {
if (cnt % DUMP_REGNUM_PER_LINE == 0)
(void)fprintf(f, " [%4x]:", i);
(void)fprintf(f, " 0x%08x", hisi_dma_read_dev(hw, i));
cnt++;
if (cnt % DUMP_REGNUM_PER_LINE == 0)
(void)fprintf(f, "\n");
}
if (cnt % DUMP_REGNUM_PER_LINE)
(void)fprintf(f, "\n");
}
static void
hisi_dma_dump_read_queue(struct hisi_dma_dev *hw, uint32_t qoff,
char *buffer, int max_sz)
{
memset(buffer, 0, max_sz);
/* Address-related registers are not printed for security reasons. */
if (qoff == HISI_DMA_QUEUE_SQ_BASE_L_REG ||
qoff == HISI_DMA_QUEUE_SQ_BASE_H_REG ||
qoff == HISI_DMA_QUEUE_CQ_BASE_L_REG ||
qoff == HISI_DMA_QUEUE_CQ_BASE_H_REG) {
(void)snprintf(buffer, max_sz, "**********");
return;
}
(void)snprintf(buffer, max_sz, "0x%08x", hisi_dma_read_queue(hw, qoff));
}
static void
hisi_dma_dump_queue(struct hisi_dma_dev *hw, FILE *f)
{
#define REG_FMT_LEN 32
char buf[REG_FMT_LEN] = { 0 };
uint32_t i;
(void)fprintf(f, " queue-register:\n");
for (i = 0; i < HISI_DMA_QUEUE_REGION_SIZE; ) {
hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
(void)fprintf(f, " [%2x]: %s", i, buf);
i += sizeof(uint32_t);
hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
(void)fprintf(f, " %s", buf);
i += sizeof(uint32_t);
hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
(void)fprintf(f, " %s", buf);
i += sizeof(uint32_t);
hisi_dma_dump_read_queue(hw, i, buf, sizeof(buf));
(void)fprintf(f, " %s\n", buf);
i += sizeof(uint32_t);
}
}
static int
hisi_dma_dump(const struct rte_dma_dev *dev, FILE *f)
{
struct hisi_dma_dev *hw = dev->data->dev_private;
(void)fprintf(f,
" revision: 0x%x queue_id: %u ring_size: %u\n"
" ridx: %u cridx: %u\n"
" sq_head: %u sq_tail: %u cq_sq_head: %u\n"
" cq_head: %u cqs_completed: %u cqe_vld: %u\n"
" submitted: %" PRIu64 " completed: %" PRIu64 " errors %"
PRIu64"\n",
hw->revision, hw->queue_id,
hw->sq_depth_mask > 0 ? hw->sq_depth_mask + 1 : 0,
hw->ridx, hw->cridx,
hw->sq_head, hw->sq_tail, hw->cq_sq_head,
hw->cq_head, hw->cqs_completed, hw->cqe_vld,
hw->submitted, hw->completed, hw->errors);
hisi_dma_dump_queue(hw, f);
hisi_dma_dump_common(hw, f);
return 0;
}
static void
hisi_dma_gen_pci_device_name(const struct rte_pci_device *pci_dev,
char *name, size_t size)
@ -196,6 +549,33 @@ hisi_dma_gen_dev_name(const struct rte_pci_device *pci_dev,
pci_dev->addr.function, queue_id);
}
/**
* Hardware queue state machine:
*
* ----------- dmadev_create ------------------
* | Unknown | ---------------> | IDLE |
* ----------- ------------------
* ^ |
* | |dev_start
* dev_stop| |
* | v
* ------------------
* | RUN |
* ------------------
*
*/
static const struct rte_dma_dev_ops hisi_dmadev_ops = {
.dev_info_get = hisi_dma_info_get,
.dev_configure = hisi_dma_configure,
.dev_start = hisi_dma_start,
.dev_stop = hisi_dma_stop,
.dev_close = hisi_dma_close,
.vchan_setup = hisi_dma_vchan_setup,
.stats_get = hisi_dma_stats_get,
.stats_reset = hisi_dma_stats_reset,
.dev_dump = hisi_dma_dump,
};
static int
hisi_dma_create(struct rte_pci_device *pci_dev, uint8_t queue_id,
uint8_t revision)
@ -216,6 +596,7 @@ hisi_dma_create(struct rte_pci_device *pci_dev, uint8_t queue_id,
}
dev->device = &pci_dev->device;
dev->dev_ops = &hisi_dmadev_ops;
hw = dev->data->dev_private;
hw->data = dev->data;
@ -223,6 +604,10 @@ hisi_dma_create(struct rte_pci_device *pci_dev, uint8_t queue_id,
hw->reg_layout = hisi_dma_reg_layout(revision);
hw->io_base = pci_dev->mem_resource[REG_PCI_BAR_INDEX].addr;
hw->queue_id = queue_id;
hw->sq_tail_reg = hisi_dma_queue_regaddr(hw,
HISI_DMA_QUEUE_SQ_TAIL_REG);
hw->cq_head_reg = hisi_dma_queue_regaddr(hw,
HISI_DMA_QUEUE_CQ_HEAD_REG);
ret = hisi_dma_reset_hw(hw);
if (ret) {

View File

@ -16,12 +16,17 @@
#define FIELD_GET(mask, reg) \
((typeof(mask))(((reg) & (mask)) >> BF_SHF(mask)))
#define lower_32_bits(x) ((uint32_t)(x))
#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define HISI_DMA_DEVICE_ID 0xA122
#define HISI_DMA_PCI_REVISION_ID_REG 0x08
#define HISI_DMA_REVISION_HIP08B 0x21
#define HISI_DMA_MAX_HW_QUEUES 4
#define HISI_DMA_MAX_DESC_NUM 8192
#define HISI_DMA_MIN_DESC_NUM 32
/**
* The HIP08B(HiSilicon IP08) and later Chip(e.g. HiSilicon IP09) are DMA iEPs,
@ -110,12 +115,106 @@ enum {
HISI_DMA_STATE_RUN,
};
/**
* After scanning the CQ array, the CQ head register needs to be updated.
* Updating the register involves write memory barrier operations.
* Here use the following method to reduce WMB operations:
* a) malloc more CQEs, which correspond to the macro HISI_DMA_CQ_RESERVED.
* b) update the CQ head register after accumulated number of completed CQs
* is greater than or equal to HISI_DMA_CQ_RESERVED.
*/
#define HISI_DMA_CQ_RESERVED 64
struct hisi_dma_sqe {
uint32_t dw0;
#define SQE_FENCE_FLAG BIT(10)
#define SQE_OPCODE_M2M 0x4
uint32_t dw1;
uint32_t dw2;
uint32_t length;
uint64_t src_addr;
uint64_t dst_addr;
};
struct hisi_dma_cqe {
uint64_t rsv;
uint64_t misc;
#define CQE_SQ_HEAD_MASK GENMASK(15, 0)
#define CQE_VALID_B BIT(48)
#define CQE_STATUS_MASK GENMASK(63, 49)
};
struct hisi_dma_dev {
struct hisi_dma_sqe *sqe;
volatile struct hisi_dma_cqe *cqe;
uint16_t *status; /* the completion status array of SQEs. */
volatile void *sq_tail_reg; /**< register address for doorbell. */
volatile void *cq_head_reg; /**< register address for answer CQ. */
uint16_t sq_depth_mask; /**< SQ depth - 1, the SQ depth is power of 2 */
uint16_t cq_depth; /* CQ depth */
uint16_t ridx; /**< ring index which will assign to the next request. */
/** ring index which returned by hisi_dmadev_completed APIs. */
uint16_t cridx;
/**
* SQE array management fields:
*
* -----------------------------------------------------
* | SQE0 | SQE1 | SQE2 | ... | SQEx | ... | SQEn-1 |
* -----------------------------------------------------
* ^ ^ ^
* | | |
* sq_head cq_sq_head sq_tail
*
* sq_head: index to the oldest completed request, this filed was
* updated by hisi_dmadev_completed* APIs.
* sq_tail: index of the next new request, this field was updated by
* hisi_dmadev_copy API.
* cq_sq_head: next index of index that has been completed by hardware,
* this filed was updated by hisi_dmadev_completed* APIs.
*
* [sq_head, cq_sq_head): the SQEs that hardware already completed.
* [cq_sq_head, sq_tail): the SQEs that hardware processing.
*/
uint16_t sq_head;
uint16_t sq_tail;
uint16_t cq_sq_head;
/**
* The driver scans the CQE array, if the valid bit changes, the CQE is
* considered valid.
* Note: One CQE is corresponding to one or several SQEs, e.g. app
* submits two copy requests, the hardware processes the two SQEs,
* but it may write back only one CQE and the CQE's sq_head field
* indicates the index of the second copy request in the SQE
* array.
*/
uint16_t cq_head; /**< CQ index for next scans. */
/** accumulated number of completed CQs
* @see HISI_DMA_CQ_RESERVED
*/
uint16_t cqs_completed;
uint8_t cqe_vld; /**< valid bit for CQE, will change for every round. */
uint64_t submitted;
uint64_t completed;
uint64_t errors;
/**
* The following fields are not accessed in the I/O path, so they are
* placed at the end.
*/
struct rte_dma_dev_data *data;
uint8_t revision; /**< PCI revision. */
uint8_t reg_layout; /**< hardware register layout. */
void *io_base;
uint8_t queue_id; /**< hardware DMA queue index. */
const struct rte_memzone *iomz;
uint32_t iomz_sz;
rte_iova_t sqe_iova;
rte_iova_t cqe_iova;
};
#endif /* HISI_DMADEV_H */