bus/fslmc: keep Tx queues information for DPCI devices

The DPCI devices have both Tx and Rx queues. Event devices use
DPCI Rx queues only, but CMDIF (AIOP) uses both Tx and Rx queues.
This patch enables Tx queues configuration too.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Acked-by: Shreyansh Jain <shreyansh.jain@nxp.com>
This commit is contained in:
Nipun Gupta 2018-05-04 15:41:25 +05:30 committed by Thomas Monjalon
parent 3980bed77e
commit 91e96999ef
3 changed files with 70 additions and 29 deletions

View File

@ -39,13 +39,14 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
struct dpci_attr attr;
struct dpci_rx_queue_cfg rx_queue_cfg;
struct dpci_rx_queue_attr rx_attr;
struct dpci_tx_queue_attr tx_attr;
int ret, i;
/* Allocate DPAA2 dpci handle */
dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
if (!dpci_node) {
DPAA2_BUS_ERR("Memory allocation failed for DPCI Device");
return -1;
return -ENOMEM;
}
/* Open the dpci object */
@ -54,8 +55,7 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
CMD_PRI_LOW, dpci_id, &dpci_node->token);
if (ret) {
DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
rte_free(dpci_node);
return -1;
goto err;
}
/* Get the device attributes */
@ -63,21 +63,40 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
CMD_PRI_LOW, dpci_node->token, &attr);
if (ret != 0) {
DPAA2_BUS_ERR("Reading device failed with err code: %d", ret);
rte_free(dpci_node);
return -1;
goto err;
}
/* Set up the Rx Queue */
memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
ret = dpci_set_rx_queue(&dpci_node->dpci,
CMD_PRI_LOW,
dpci_node->token,
0, &rx_queue_cfg);
if (ret) {
DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d",
ret);
rte_free(dpci_node);
return -1;
for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
struct dpaa2_queue *rxq;
memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
ret = dpci_set_rx_queue(&dpci_node->dpci,
CMD_PRI_LOW,
dpci_node->token,
i, &rx_queue_cfg);
if (ret) {
DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d",
ret);
goto err;
}
/* Allocate DQ storage for the DPCI Rx queues */
rxq = &(dpci_node->rx_queue[i]);
rxq->q_storage = rte_malloc("dq_storage",
sizeof(struct queue_storage_info_t),
RTE_CACHE_LINE_SIZE);
if (!rxq->q_storage) {
DPAA2_BUS_ERR("q_storage allocation failed\n");
ret = -ENOMEM;
goto err;
}
memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
ret = dpaa2_alloc_dq_storage(rxq->q_storage);
if (ret) {
DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
goto err;
}
}
/* Enable the device */
@ -85,8 +104,7 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
CMD_PRI_LOW, dpci_node->token);
if (ret != 0) {
DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret);
rte_free(dpci_node);
return -1;
goto err;
}
for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
@ -96,13 +114,22 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
dpci_node->token, i,
&rx_attr);
if (ret != 0) {
DPAA2_BUS_ERR("Rx queue fetch failed with err code:"
" %d", ret);
rte_free(dpci_node);
return -1;
DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d",
ret);
goto err;
}
dpci_node->rx_queue[i].fqid = rx_attr.fqid;
dpci_node->queue[i].fqid = rx_attr.fqid;
ret = dpci_get_tx_queue(&dpci_node->dpci,
CMD_PRI_LOW,
dpci_node->token, i,
&tx_attr);
if (ret != 0) {
DPAA2_BUS_ERR("Reading device failed with err code: %d",
ret);
goto err;
}
dpci_node->tx_queue[i].fqid = tx_attr.fqid;
}
dpci_node->dpci_id = dpci_id;
@ -111,6 +138,19 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
return 0;
err:
for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
if (rxq->q_storage) {
dpaa2_free_dq_storage(rxq->q_storage);
rte_free(rxq->q_storage);
}
}
rte_free(dpci_node);
return ret;
}
struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)

View File

@ -142,7 +142,8 @@ struct dpaa2_dpci_dev {
uint16_t token;
rte_atomic16_t in_use;
uint32_t dpci_id; /*HW ID for DPCI object */
struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES];
struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES];
struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES];
};
/*! Global MCP list */

View File

@ -87,10 +87,10 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
const struct rte_event *event = &ev[num_tx + loop];
if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
fqid = evq_info->dpci->queue[
fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
else
fqid = evq_info->dpci->queue[
fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
/* Prepare enqueue descriptor */
@ -733,13 +733,13 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
dpaa2_eventdev_process_parallel;
dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
dpaa2_eventdev_process_atomic;
for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->queue[i]);
rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
ret = dpci_set_rx_queue(&dpci_dev->dpci,
CMD_PRI_LOW,
dpci_dev->token, i,