dma/idxd: add configure and info
Add functions for device configuration. The info_get function is included here since it can be useful for checking successful configuration. Documentation is also updated to add device configuration usage info. Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> Signed-off-by: Kevin Laatz <kevin.laatz@intel.com> Reviewed-by: Conor Walsh <conor.walsh@intel.com> Reviewed-by: Chengwen Feng <fengchengwen@huawei.com>
This commit is contained in:
parent
82147042d0
commit
2f7d42c6e1
@ -120,3 +120,18 @@ use a subset of configured queues.
|
||||
Once probed successfully, irrespective of kernel driver, the device will appear as a ``dmadev``,
|
||||
that is a "DMA device type" inside DPDK, and can be accessed using APIs from the
|
||||
``rte_dmadev`` library.
|
||||
|
||||
Using IDXD DMAdev Devices
|
||||
--------------------------
|
||||
|
||||
To use the devices from an application, the dmadev API can be used.
|
||||
|
||||
Device Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
IDXD configuration requirements:
|
||||
|
||||
* ``ring_size`` must be a power of two, between 64 and 4096.
|
||||
* Only one ``vchan`` is supported per device (work queue).
|
||||
* IDXD devices do not support silent mode.
|
||||
* The transfer direction must be set to ``RTE_DMA_DIR_MEM_TO_MEM`` to copy from memory to memory.
|
||||
|
@ -97,6 +97,9 @@ idxd_dev_close(struct rte_dma_dev *dev)
|
||||
static const struct rte_dma_dev_ops idxd_bus_ops = {
|
||||
.dev_close = idxd_dev_close,
|
||||
.dev_dump = idxd_dump,
|
||||
.dev_configure = idxd_configure,
|
||||
.vchan_setup = idxd_vchan_setup,
|
||||
.dev_info_get = idxd_info_get,
|
||||
};
|
||||
|
||||
static void *
|
||||
|
@ -39,6 +39,77 @@ idxd_dump(const struct rte_dma_dev *dev, FILE *f)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
|
||||
{
|
||||
struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
|
||||
|
||||
if (size < sizeof(*info))
|
||||
return -EINVAL;
|
||||
|
||||
*info = (struct rte_dma_info) {
|
||||
.dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_HANDLES_ERRORS |
|
||||
RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_FILL,
|
||||
.max_vchans = 1,
|
||||
.max_desc = 4096,
|
||||
.min_desc = 64,
|
||||
};
|
||||
if (idxd->sva_support)
|
||||
info->dev_capa |= RTE_DMA_CAPA_SVA;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
|
||||
uint32_t conf_sz)
|
||||
{
|
||||
if (sizeof(struct rte_dma_conf) != conf_sz)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev_conf->nb_vchans != 1)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
|
||||
const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
|
||||
{
|
||||
struct idxd_dmadev *idxd = dev->fp_obj->dev_private;
|
||||
uint16_t max_desc = qconf->nb_desc;
|
||||
|
||||
if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
|
||||
return -EINVAL;
|
||||
|
||||
idxd->qcfg = *qconf;
|
||||
|
||||
if (!rte_is_power_of_2(max_desc))
|
||||
max_desc = rte_align32pow2(max_desc);
|
||||
IDXD_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
|
||||
idxd->desc_ring_mask = max_desc - 1;
|
||||
idxd->qcfg.nb_desc = max_desc;
|
||||
|
||||
/* in case we are reconfiguring a device, free any existing memory */
|
||||
rte_free(idxd->desc_ring);
|
||||
|
||||
/* allocate the descriptor ring at 2x size as batches can't wrap */
|
||||
idxd->desc_ring = rte_zmalloc(NULL, sizeof(*idxd->desc_ring) * max_desc * 2, 0);
|
||||
if (idxd->desc_ring == NULL)
|
||||
return -ENOMEM;
|
||||
idxd->desc_iova = rte_mem_virt2iova(idxd->desc_ring);
|
||||
|
||||
idxd->batch_idx_read = 0;
|
||||
idxd->batch_idx_write = 0;
|
||||
idxd->batch_start = 0;
|
||||
idxd->batch_size = 0;
|
||||
idxd->ids_returned = 0;
|
||||
idxd->ids_avail = 0;
|
||||
|
||||
memset(idxd->batch_comp_ring, 0, sizeof(*idxd->batch_comp_ring) *
|
||||
(idxd->max_batches + 1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
idxd_dmadev_create(const char *name, struct rte_device *dev,
|
||||
const struct idxd_dmadev *base_idxd,
|
||||
|
@ -81,5 +81,11 @@ struct idxd_dmadev {
|
||||
int idxd_dmadev_create(const char *name, struct rte_device *dev,
|
||||
const struct idxd_dmadev *base_idxd, const struct rte_dma_dev_ops *ops);
|
||||
int idxd_dump(const struct rte_dma_dev *dev, FILE *f);
|
||||
int idxd_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *dev_conf,
|
||||
uint32_t conf_sz);
|
||||
int idxd_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
|
||||
const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz);
|
||||
int idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
|
||||
uint32_t size);
|
||||
|
||||
#endif /* _IDXD_INTERNAL_H_ */
|
||||
|
@ -85,6 +85,9 @@ idxd_pci_dev_close(struct rte_dma_dev *dev)
|
||||
static const struct rte_dma_dev_ops idxd_pci_ops = {
|
||||
.dev_close = idxd_pci_dev_close,
|
||||
.dev_dump = idxd_dump,
|
||||
.dev_configure = idxd_configure,
|
||||
.vchan_setup = idxd_vchan_setup,
|
||||
.dev_info_get = idxd_info_get,
|
||||
};
|
||||
|
||||
/* each portal uses 4 x 4k pages */
|
||||
|
Loading…
x
Reference in New Issue
Block a user