dma/idxd: add burst capacity

Add support for the burst capacity API. This API will provide the calling
application with the remaining capacity of the current burst (limited by
max HW batch size).

Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
Reviewed-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Chengwen Feng <fengchengwen@huawei.com>
This commit is contained in:
Kevin Laatz 2021-10-20 16:30:10 +00:00 committed by Thomas Monjalon
parent 5a23df349a
commit 9459de4edc
3 changed files with 23 additions and 0 deletions

View File

@ -468,6 +468,26 @@ idxd_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t
return 0; return 0;
} }
uint16_t
idxd_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
{
const struct idxd_dmadev *idxd = dev_private;
uint16_t write_idx = idxd->batch_start + idxd->batch_size;
uint16_t used_space;
/* Check for space in the batch ring */
if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == idxd->max_batches) ||
idxd->batch_idx_write + 1 == idxd->batch_idx_read)
return 0;
/* For descriptors, check for wrap-around on write but not read */
if (idxd->ids_returned > write_idx)
write_idx += idxd->desc_ring_mask + 1;
used_space = write_idx - idxd->ids_returned;
return RTE_MIN((idxd->desc_ring_mask - used_space), idxd->max_batch_size);
}
int int
idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf, idxd_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
uint32_t conf_sz) uint32_t conf_sz)
@ -553,6 +573,7 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,
dmadev->fp_obj->submit = idxd_submit; dmadev->fp_obj->submit = idxd_submit;
dmadev->fp_obj->completed = idxd_completed; dmadev->fp_obj->completed = idxd_completed;
dmadev->fp_obj->completed_status = idxd_completed_status; dmadev->fp_obj->completed_status = idxd_completed_status;
dmadev->fp_obj->burst_capacity = idxd_burst_capacity;
idxd = dmadev->data->dev_private; idxd = dmadev->data->dev_private;
*idxd = *base_idxd; /* copy over the main fields already passed in */ *idxd = *base_idxd; /* copy over the main fields already passed in */

View File

@ -104,5 +104,6 @@ int idxd_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan); int idxd_stats_reset(struct rte_dma_dev *dev, uint16_t vchan);
int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan, int idxd_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
enum rte_dma_vchan_status *status); enum rte_dma_vchan_status *status);
uint16_t idxd_burst_capacity(const void *dev_private, uint16_t vchan);
#endif /* _IDXD_INTERNAL_H_ */ #endif /* _IDXD_INTERNAL_H_ */

View File

@ -258,6 +258,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd,
idxd->u.pci = pci; idxd->u.pci = pci;
idxd->max_batches = wq_size; idxd->max_batches = wq_size;
idxd->max_batch_size = 1 << lg2_max_batch;
/* enable the device itself */ /* enable the device itself */
err_code = idxd_pci_dev_command(idxd, idxd_enable_dev); err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);