raw/ioat: configure idxd devices
Add configure function for idxd devices, taking the same parameters as the existing configure function for ioat. The ring_size parameter is used to compute the maximum number of bursts to be supported by the driver, given that the hardware works on individual bursts of descriptors at a time. Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com> Acked-by: Radu Nicolau <radu.nicolau@intel.com>
This commit is contained in:
parent
389d519785
commit
69c4162643
@ -55,6 +55,7 @@ static const struct rte_rawdev_ops idxd_pci_ops = {
|
||||
.dev_close = idxd_rawdev_close,
|
||||
.dev_selftest = idxd_rawdev_test,
|
||||
.dump = idxd_dev_dump,
|
||||
.dev_configure = idxd_dev_configure,
|
||||
};
|
||||
|
||||
/* each portal uses 4 x 4k pages */
|
||||
|
@ -34,6 +34,7 @@ static const struct rte_rawdev_ops idxd_vdev_ops = {
|
||||
.dev_close = idxd_rawdev_close,
|
||||
.dev_selftest = idxd_rawdev_test,
|
||||
.dump = idxd_dev_dump,
|
||||
.dev_configure = idxd_dev_configure,
|
||||
};
|
||||
|
||||
static void *
|
||||
|
@ -44,6 +44,70 @@ idxd_dev_dump(struct rte_rawdev *dev, FILE *f)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
idxd_dev_configure(const struct rte_rawdev *dev,
|
||||
rte_rawdev_obj_t config, size_t config_size)
|
||||
{
|
||||
struct idxd_rawdev *idxd = dev->dev_private;
|
||||
struct rte_idxd_rawdev *rte_idxd = &idxd->public;
|
||||
struct rte_ioat_rawdev_config *cfg = config;
|
||||
uint16_t max_desc = cfg->ring_size;
|
||||
uint16_t max_batches = max_desc / BATCH_SIZE;
|
||||
uint16_t i;
|
||||
|
||||
if (config_size != sizeof(*cfg))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->started) {
|
||||
IOAT_PMD_ERR("%s: Error, device is started.", __func__);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
rte_idxd->hdls_disable = cfg->hdls_disable;
|
||||
|
||||
/* limit the batches to what can be stored in hardware */
|
||||
if (max_batches > idxd->max_batches) {
|
||||
IOAT_PMD_DEBUG("Ring size of %u is too large for this device, need to limit to %u batches of %u",
|
||||
max_desc, idxd->max_batches, BATCH_SIZE);
|
||||
max_batches = idxd->max_batches;
|
||||
max_desc = max_batches * BATCH_SIZE;
|
||||
}
|
||||
if (!rte_is_power_of_2(max_desc))
|
||||
max_desc = rte_align32pow2(max_desc);
|
||||
IOAT_PMD_DEBUG("Rawdev %u using %u descriptors in %u batches",
|
||||
dev->dev_id, max_desc, max_batches);
|
||||
|
||||
/* in case we are reconfiguring a device, free any existing memory */
|
||||
rte_free(rte_idxd->batch_ring);
|
||||
rte_free(rte_idxd->hdl_ring);
|
||||
|
||||
rte_idxd->batch_ring = rte_zmalloc(NULL,
|
||||
sizeof(*rte_idxd->batch_ring) * max_batches, 0);
|
||||
if (rte_idxd->batch_ring == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
rte_idxd->hdl_ring = rte_zmalloc(NULL,
|
||||
sizeof(*rte_idxd->hdl_ring) * max_desc, 0);
|
||||
if (rte_idxd->hdl_ring == NULL) {
|
||||
rte_free(rte_idxd->batch_ring);
|
||||
rte_idxd->batch_ring = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
rte_idxd->batch_ring_sz = max_batches;
|
||||
rte_idxd->hdl_ring_sz = max_desc;
|
||||
|
||||
for (i = 0; i < rte_idxd->batch_ring_sz; i++) {
|
||||
struct rte_idxd_desc_batch *b = &rte_idxd->batch_ring[i];
|
||||
b->batch_desc.completion = rte_mem_virt2iova(&b->comp);
|
||||
b->batch_desc.desc_addr = rte_mem_virt2iova(&b->null_desc);
|
||||
b->batch_desc.op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) |
|
||||
IDXD_FLAG_COMPLETION_ADDR_VALID |
|
||||
IDXD_FLAG_REQUEST_COMPLETION;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
idxd_rawdev_create(const char *name, struct rte_device *dev,
|
||||
const struct idxd_rawdev *base_idxd,
|
||||
|
@ -59,6 +59,9 @@ extern int idxd_rawdev_create(const char *name, struct rte_device *dev,
|
||||
|
||||
extern int idxd_rawdev_close(struct rte_rawdev *dev);
|
||||
|
||||
extern int idxd_dev_configure(const struct rte_rawdev *dev,
|
||||
rte_rawdev_obj_t config, size_t config_size);
|
||||
|
||||
extern int idxd_rawdev_test(uint16_t dev_id);
|
||||
|
||||
extern int idxd_dev_dump(struct rte_rawdev *dev, FILE *f);
|
||||
|
@ -187,6 +187,7 @@ struct rte_idxd_rawdev {
|
||||
uint16_t next_ret_hdl; /* the next user hdl to return */
|
||||
uint16_t last_completed_hdl; /* the last user hdl that has completed */
|
||||
uint16_t next_free_hdl; /* where the handle for next op will go */
|
||||
uint16_t hdls_disable; /* disable tracking completion handles */
|
||||
|
||||
struct rte_idxd_user_hdl *hdl_ring;
|
||||
struct rte_idxd_desc_batch *batch_ring;
|
||||
|
Loading…
x
Reference in New Issue
Block a user