2018-01-08 10:55:14 +05:30
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2016 Cavium, Inc
|
2016-12-06 07:21:46 +05:30
|
|
|
*/
|
|
|
|
|
|
|
|
#include <ctype.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
|
2019-04-03 15:45:05 +01:00
|
|
|
#include <rte_string_fns.h>
|
2016-12-06 07:21:46 +05:30
|
|
|
#include <rte_byteorder.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_debug.h>
|
|
|
|
#include <rte_dev.h>
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_memcpy.h>
|
|
|
|
#include <rte_memzone.h>
|
|
|
|
#include <rte_eal.h>
|
|
|
|
#include <rte_per_lcore.h>
|
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_errno.h>
|
2017-10-11 03:51:31 +05:30
|
|
|
#include <rte_ethdev.h>
|
2018-05-09 13:47:58 +05:30
|
|
|
#include <rte_cryptodev.h>
|
|
|
|
#include <rte_cryptodev_pmd.h>
|
2020-09-18 13:39:22 -04:00
|
|
|
#include <rte_telemetry.h>
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
#include "rte_eventdev.h"
|
|
|
|
#include "rte_eventdev_pmd.h"
|
2020-04-23 00:33:46 +05:30
|
|
|
#include "rte_eventdev_trace.h"
|
2016-12-06 07:21:46 +05:30
|
|
|
|
2018-10-28 23:57:38 +00:00
|
|
|
static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
|
2016-12-06 07:21:46 +05:30
|
|
|
|
2018-10-28 23:57:41 +00:00
|
|
|
struct rte_eventdev *rte_eventdevs = rte_event_devices;
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
static struct rte_eventdev_global eventdev_globals = {
|
|
|
|
.nb_devs = 0
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Event dev north bound API implementation */
|
|
|
|
|
|
|
|
uint8_t
|
|
|
|
rte_event_dev_count(void)
|
|
|
|
{
|
2018-10-28 23:57:41 +00:00
|
|
|
return eventdev_globals.nb_devs;
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_dev_get_dev_id(const char *name)
|
|
|
|
{
|
|
|
|
int i;
|
2018-06-15 09:15:24 +05:30
|
|
|
uint8_t cmp;
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
if (!name)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-10-28 23:57:41 +00:00
|
|
|
for (i = 0; i < eventdev_globals.nb_devs; i++) {
|
2018-06-15 09:15:24 +05:30
|
|
|
cmp = (strncmp(rte_event_devices[i].data->name, name,
|
|
|
|
RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
|
|
|
|
(rte_event_devices[i].dev ? (strncmp(
|
|
|
|
rte_event_devices[i].dev->driver->name, name,
|
|
|
|
RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
|
|
|
|
if (cmp && (rte_event_devices[i].attached ==
|
|
|
|
RTE_EVENTDEV_ATTACHED))
|
2016-12-06 07:21:46 +05:30
|
|
|
return i;
|
2018-06-15 09:15:24 +05:30
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_dev_socket_id(uint8_t dev_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
return dev->data->socket_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (dev_info == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memset(dev_info, 0, sizeof(struct rte_event_dev_info));
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
|
|
|
|
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
|
|
|
|
|
|
|
|
dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
|
|
|
|
|
2017-03-03 21:03:02 +05:30
|
|
|
dev_info->dev = dev->dev;
|
2016-12-06 07:21:46 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-11 03:51:31 +05:30
|
|
|
int
|
2018-09-25 15:19:05 +05:30
|
|
|
rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
|
2017-10-11 03:51:31 +05:30
|
|
|
uint32_t *caps)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
|
|
|
|
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (caps == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
*caps = 0;
|
|
|
|
|
|
|
|
return dev->dev_ops->eth_rx_adapter_caps_get ?
|
|
|
|
(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
|
|
|
|
&rte_eth_devices[eth_port_id],
|
|
|
|
caps)
|
|
|
|
: 0;
|
|
|
|
}
|
|
|
|
|
2019-04-19 17:40:40 +05:30
|
|
|
int
|
2018-04-04 16:51:08 -05:00
|
|
|
rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
const struct rte_event_timer_adapter_ops *ops;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (caps == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
*caps = 0;
|
|
|
|
|
|
|
|
return dev->dev_ops->timer_adapter_caps_get ?
|
|
|
|
(*dev->dev_ops->timer_adapter_caps_get)(dev,
|
|
|
|
0,
|
|
|
|
caps,
|
|
|
|
&ops)
|
|
|
|
: 0;
|
|
|
|
}
|
|
|
|
|
2019-04-19 17:40:40 +05:30
|
|
|
int
|
2018-05-09 13:47:58 +05:30
|
|
|
rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
|
|
|
|
uint32_t *caps)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
struct rte_cryptodev *cdev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
cdev = rte_cryptodev_pmd_get_dev(cdev_id);
|
|
|
|
|
|
|
|
if (caps == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
*caps = 0;
|
|
|
|
|
|
|
|
return dev->dev_ops->crypto_adapter_caps_get ?
|
|
|
|
(*dev->dev_ops->crypto_adapter_caps_get)
|
|
|
|
(dev, cdev, caps) : -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2019-04-19 17:40:40 +05:30
|
|
|
int
|
2018-09-20 23:11:13 +05:30
|
|
|
rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
|
|
|
|
uint32_t *caps)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
struct rte_eth_dev *eth_dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
|
|
|
|
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
eth_dev = &rte_eth_devices[eth_port_id];
|
|
|
|
|
|
|
|
if (caps == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*caps = 0;
|
|
|
|
|
|
|
|
return dev->dev_ops->eth_tx_adapter_caps_get ?
|
|
|
|
(*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
|
|
|
|
eth_dev,
|
|
|
|
caps)
|
|
|
|
: 0;
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
static inline int
|
|
|
|
rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
|
|
|
|
{
|
|
|
|
uint8_t old_nb_queues = dev->data->nb_queues;
|
2017-09-20 10:21:01 -05:00
|
|
|
struct rte_event_queue_conf *queues_cfg;
|
2016-12-06 07:21:46 +05:30
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
|
|
|
|
dev->data->dev_id);
|
|
|
|
|
|
|
|
/* First time configuration */
|
2017-09-20 10:21:01 -05:00
|
|
|
if (dev->data->queues_cfg == NULL && nb_queues != 0) {
|
|
|
|
/* Allocate memory to store queue configuration */
|
|
|
|
dev->data->queues_cfg = rte_zmalloc_socket(
|
|
|
|
"eventdev->data->queues_cfg",
|
|
|
|
sizeof(dev->data->queues_cfg[0]) * nb_queues,
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
|
2017-09-20 10:21:01 -05:00
|
|
|
if (dev->data->queues_cfg == NULL) {
|
2016-12-06 07:21:46 +05:30
|
|
|
dev->data->nb_queues = 0;
|
2017-09-20 10:21:01 -05:00
|
|
|
RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
|
2016-12-06 07:21:46 +05:30
|
|
|
"nb_queues %u", nb_queues);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
|
|
|
/* Re-configure */
|
2017-09-20 10:21:01 -05:00
|
|
|
} else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->queue_release)(dev, i);
|
|
|
|
|
2017-09-20 10:21:01 -05:00
|
|
|
/* Re allocate memory to store queue configuration */
|
|
|
|
queues_cfg = dev->data->queues_cfg;
|
|
|
|
queues_cfg = rte_realloc(queues_cfg,
|
|
|
|
sizeof(queues_cfg[0]) * nb_queues,
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_CACHE_LINE_SIZE);
|
2017-09-20 10:21:01 -05:00
|
|
|
if (queues_cfg == NULL) {
|
|
|
|
RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
|
2016-12-06 07:21:46 +05:30
|
|
|
" nb_queues %u", nb_queues);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
2017-09-20 10:21:01 -05:00
|
|
|
dev->data->queues_cfg = queues_cfg;
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
if (nb_queues > old_nb_queues) {
|
|
|
|
uint8_t new_qs = nb_queues - old_nb_queues;
|
|
|
|
|
2017-09-20 10:21:01 -05:00
|
|
|
memset(queues_cfg + old_nb_queues, 0,
|
|
|
|
sizeof(queues_cfg[0]) * new_qs);
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
2017-09-20 10:21:01 -05:00
|
|
|
} else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->queue_release)(dev, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->data->nb_queues = nb_queues;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-03 18:05:50 +05:30
|
|
|
#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
static inline int
|
|
|
|
rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
|
|
|
|
{
|
|
|
|
uint8_t old_nb_ports = dev->data->nb_ports;
|
|
|
|
void **ports;
|
|
|
|
uint16_t *links_map;
|
2017-09-20 10:21:02 -05:00
|
|
|
struct rte_event_port_conf *ports_cfg;
|
2016-12-06 07:21:46 +05:30
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
|
|
|
|
dev->data->dev_id);
|
|
|
|
|
|
|
|
/* First time configuration */
|
|
|
|
if (dev->data->ports == NULL && nb_ports != 0) {
|
|
|
|
dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
|
|
|
|
sizeof(dev->data->ports[0]) * nb_ports,
|
|
|
|
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
|
|
|
|
if (dev->data->ports == NULL) {
|
|
|
|
dev->data->nb_ports = 0;
|
|
|
|
RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
|
|
|
|
"nb_ports %u", nb_ports);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
|
|
|
|
2017-09-20 10:21:02 -05:00
|
|
|
/* Allocate memory to store port configurations */
|
|
|
|
dev->data->ports_cfg =
|
|
|
|
rte_zmalloc_socket("eventdev->ports_cfg",
|
|
|
|
sizeof(dev->data->ports_cfg[0]) * nb_ports,
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
|
2017-09-20 10:21:02 -05:00
|
|
|
if (dev->data->ports_cfg == NULL) {
|
2016-12-06 07:21:46 +05:30
|
|
|
dev->data->nb_ports = 0;
|
2017-09-20 10:21:02 -05:00
|
|
|
RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
|
2016-12-06 07:21:46 +05:30
|
|
|
"nb_ports %u", nb_ports);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate memory to store queue to port link connection */
|
|
|
|
dev->data->links_map =
|
|
|
|
rte_zmalloc_socket("eventdev->links_map",
|
|
|
|
sizeof(dev->data->links_map[0]) * nb_ports *
|
|
|
|
RTE_EVENT_MAX_QUEUES_PER_DEV,
|
|
|
|
RTE_CACHE_LINE_SIZE, dev->data->socket_id);
|
|
|
|
if (dev->data->links_map == NULL) {
|
|
|
|
dev->data->nb_ports = 0;
|
|
|
|
RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
|
|
|
|
"nb_ports %u", nb_ports);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
2017-04-03 18:05:50 +05:30
|
|
|
for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
|
|
|
|
dev->data->links_map[i] =
|
|
|
|
EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
|
2016-12-06 07:21:46 +05:30
|
|
|
} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
|
|
|
|
|
|
|
|
ports = dev->data->ports;
|
2017-09-20 10:21:02 -05:00
|
|
|
ports_cfg = dev->data->ports_cfg;
|
2016-12-06 07:21:46 +05:30
|
|
|
links_map = dev->data->links_map;
|
|
|
|
|
|
|
|
for (i = nb_ports; i < old_nb_ports; i++)
|
|
|
|
(*dev->dev_ops->port_release)(ports[i]);
|
|
|
|
|
|
|
|
/* Realloc memory for ports */
|
|
|
|
ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (ports == NULL) {
|
|
|
|
RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
|
|
|
|
" nb_ports %u", nb_ports);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
|
|
|
|
2017-09-20 10:21:02 -05:00
|
|
|
/* Realloc memory for ports_cfg */
|
|
|
|
ports_cfg = rte_realloc(ports_cfg,
|
|
|
|
sizeof(ports_cfg[0]) * nb_ports,
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_CACHE_LINE_SIZE);
|
2017-09-20 10:21:02 -05:00
|
|
|
if (ports_cfg == NULL) {
|
|
|
|
RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
|
2016-12-06 07:21:46 +05:30
|
|
|
" nb_ports %u", nb_ports);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Realloc memory to store queue to port link connection */
|
|
|
|
links_map = rte_realloc(links_map,
|
|
|
|
sizeof(dev->data->links_map[0]) * nb_ports *
|
|
|
|
RTE_EVENT_MAX_QUEUES_PER_DEV,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
2017-07-17 17:09:47 +01:00
|
|
|
if (links_map == NULL) {
|
2016-12-06 07:21:46 +05:30
|
|
|
dev->data->nb_ports = 0;
|
|
|
|
RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
|
|
|
|
"nb_ports %u", nb_ports);
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nb_ports > old_nb_ports) {
|
|
|
|
uint8_t new_ps = nb_ports - old_nb_ports;
|
2017-04-03 18:05:50 +05:30
|
|
|
unsigned int old_links_map_end =
|
|
|
|
old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
|
|
|
|
unsigned int links_map_end =
|
|
|
|
nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
memset(ports + old_nb_ports, 0,
|
|
|
|
sizeof(ports[0]) * new_ps);
|
2017-09-20 10:21:02 -05:00
|
|
|
memset(ports_cfg + old_nb_ports, 0,
|
|
|
|
sizeof(ports_cfg[0]) * new_ps);
|
2017-04-03 18:05:50 +05:30
|
|
|
for (i = old_links_map_end; i < links_map_end; i++)
|
|
|
|
links_map[i] =
|
|
|
|
EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
dev->data->ports = ports;
|
2017-09-20 10:21:02 -05:00
|
|
|
dev->data->ports_cfg = ports_cfg;
|
2016-12-06 07:21:46 +05:30
|
|
|
dev->data->links_map = links_map;
|
|
|
|
} else if (dev->data->ports != NULL && nb_ports == 0) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
|
|
|
|
|
|
|
|
ports = dev->data->ports;
|
|
|
|
for (i = nb_ports; i < old_nb_ports; i++)
|
|
|
|
(*dev->dev_ops->port_release)(ports[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->data->nb_ports = nb_ports;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_dev_configure(uint8_t dev_id,
|
|
|
|
const struct rte_event_dev_config *dev_conf)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
struct rte_event_dev_info info;
|
|
|
|
int diag;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
|
|
|
|
|
|
|
|
if (dev->data->dev_started) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"device %d must be stopped to allow configuration", dev_id);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_conf == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
(*dev->dev_ops->dev_infos_get)(dev, &info);
|
|
|
|
|
|
|
|
/* Check dequeue_timeout_ns value is in limit */
|
2017-04-17 15:35:59 +01:00
|
|
|
if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
|
2017-05-18 14:18:27 +05:30
|
|
|
if (dev_conf->dequeue_timeout_ns &&
|
|
|
|
(dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
|
2016-12-06 07:21:46 +05:30
|
|
|
|| dev_conf->dequeue_timeout_ns >
|
2017-05-18 14:18:27 +05:30
|
|
|
info.max_dequeue_timeout_ns)) {
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
|
|
|
|
" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
|
|
|
|
dev_id, dev_conf->dequeue_timeout_ns,
|
|
|
|
info.min_dequeue_timeout_ns,
|
|
|
|
info.max_dequeue_timeout_ns);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_events_limit is in limit */
|
|
|
|
if (dev_conf->nb_events_limit > info.max_num_events) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
|
|
|
|
dev_id, dev_conf->nb_events_limit, info.max_num_events);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_event_queues is in limit */
|
|
|
|
if (!dev_conf->nb_event_queues) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
|
|
|
|
dev_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-10-15 13:07:15 -05:00
|
|
|
if (dev_conf->nb_event_queues > info.max_event_queues +
|
|
|
|
info.max_single_link_event_port_queue_pairs) {
|
|
|
|
RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
|
|
|
|
dev_id, dev_conf->nb_event_queues,
|
|
|
|
info.max_event_queues,
|
|
|
|
info.max_single_link_event_port_queue_pairs);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (dev_conf->nb_event_queues -
|
|
|
|
dev_conf->nb_single_link_event_port_queues >
|
|
|
|
info.max_event_queues) {
|
|
|
|
RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
|
|
|
|
dev_id, dev_conf->nb_event_queues,
|
|
|
|
dev_conf->nb_single_link_event_port_queues,
|
|
|
|
info.max_event_queues);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (dev_conf->nb_single_link_event_port_queues >
|
|
|
|
dev_conf->nb_event_queues) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
|
|
|
|
dev_id,
|
|
|
|
dev_conf->nb_single_link_event_port_queues,
|
|
|
|
dev_conf->nb_event_queues);
|
2016-12-06 07:21:46 +05:30
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_event_ports is in limit */
|
|
|
|
if (!dev_conf->nb_event_ports) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-10-15 13:07:15 -05:00
|
|
|
if (dev_conf->nb_event_ports > info.max_event_ports +
|
|
|
|
info.max_single_link_event_port_queue_pairs) {
|
|
|
|
RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
|
|
|
|
dev_id, dev_conf->nb_event_ports,
|
|
|
|
info.max_event_ports,
|
|
|
|
info.max_single_link_event_port_queue_pairs);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (dev_conf->nb_event_ports -
|
|
|
|
dev_conf->nb_single_link_event_port_queues
|
|
|
|
> info.max_event_ports) {
|
|
|
|
RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
|
|
|
|
dev_id, dev_conf->nb_event_ports,
|
|
|
|
dev_conf->nb_single_link_event_port_queues,
|
|
|
|
info.max_event_ports);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_conf->nb_single_link_event_port_queues >
|
|
|
|
dev_conf->nb_event_ports) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
|
|
|
|
dev_id,
|
|
|
|
dev_conf->nb_single_link_event_port_queues,
|
|
|
|
dev_conf->nb_event_ports);
|
2016-12-06 07:21:46 +05:30
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_event_queue_flows is in limit */
|
|
|
|
if (!dev_conf->nb_event_queue_flows) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
|
|
|
|
dev_id, dev_conf->nb_event_queue_flows,
|
|
|
|
info.max_event_queue_flows);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_event_port_dequeue_depth is in limit */
|
|
|
|
if (!dev_conf->nb_event_port_dequeue_depth) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
|
|
|
|
dev_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
eventdev: introduce burst mode capability
Introducing the burst mode capability flag to express the event device
is capable of operating in burst mode for enqueue(forward, release) and
dequeue operation. If the device is not capable, then the application
still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst()
but PMD accepts only one event at a time which is any way transparent
with the current rte_event_*_burst API semantics.
It solves two purposes:
1) Fix performance regression on the PMD which supports only nonburst
mode, and this issue is two-fold.
Typically the burst_worker main loop consists of following pseudo code:
while(1)
{
uint16_t nb_rx = rte_event_dequeue_burst(ev,..);
for (i=0; i < nb_rx; i++) {
process(ev[i]);
if (is_release_required(ev[i]))
release_the_event(ev);
}
uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
events, nb_rx);
while (nb_tx < nb_rx)
nb_tx += rte_event_enqueue_burst(dev_id, port_id,
events + nb_tx, nb_rx - nb_tx);
}
Typically the non_burst_worker main loop consists of following pseudo code:
while(1)
{
uint16_t nb_rx = rte_event_dequeue_burst(&ev, , 1);
if (!nb_rx)
continue;
process(ev);
while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1);
}
Following overhead has been seen on nonburst mode capable PMDs with
burst mode version
- Extra explicit release(PMD does release on implicitly on next
dequeue) and thus avoids the cost additional driver function overhead.
- Extra "for" loop for event processing which compiler cannot detect at
runtime
2) Simplify the application configuration by avoiding the application to
find the correct enqueue and dequeue depth across different PMD.
If burst mode is not supported then, PMD can ignore depth field.
This will enable to write portable applications and makes
RFC eventdev_pipeline application works on OCTEONTX PMD
http://dpdk.org/dev/patchwork/patch/23799/
If an application wishes to get the maximum performance on nonburst
capable PMD then the application can write the code in a way that by
keeping packet processing function as inline functions and launch the
workers based on the capability.
The generic burst based worker still work on those PMDs without
any code change but this scheme needed only when the application wants
to gets the maximum performance out of nonburst capable PMDs.
This patch is based the on the real world test cases
http://dpdk.org/dev/patchwork/patch/24832/, Where without this scheme
20.9% performance drop observed per core.
See worker_wrapper(), perf_queue_worker(), perf_queue_worker_burst()
functions to use this scheme in a portable way without losing performance
on both sets of PMDs and achieving the portability.
http://dpdk.org/dev/patchwork/patch/24832/
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
2017-06-14 10:27:32 +05:30
|
|
|
if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
|
|
|
|
(dev_conf->nb_event_port_dequeue_depth >
|
|
|
|
info.max_event_port_dequeue_depth)) {
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
|
|
|
|
dev_id, dev_conf->nb_event_port_dequeue_depth,
|
|
|
|
info.max_event_port_dequeue_depth);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_event_port_enqueue_depth is in limit */
|
|
|
|
if (!dev_conf->nb_event_port_enqueue_depth) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
|
|
|
|
dev_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
eventdev: introduce burst mode capability
Introducing the burst mode capability flag to express the event device
is capable of operating in burst mode for enqueue(forward, release) and
dequeue operation. If the device is not capable, then the application
still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst()
but PMD accepts only one event at a time which is any way transparent
with the current rte_event_*_burst API semantics.
It solves two purposes:
1) Fix performance regression on the PMD which supports only nonburst
mode, and this issue is two-fold.
Typically the burst_worker main loop consists of following pseudo code:
while(1)
{
uint16_t nb_rx = rte_event_dequeue_burst(ev,..);
for (i=0; i < nb_rx; i++) {
process(ev[i]);
if (is_release_required(ev[i]))
release_the_event(ev);
}
uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id,
events, nb_rx);
while (nb_tx < nb_rx)
nb_tx += rte_event_enqueue_burst(dev_id, port_id,
events + nb_tx, nb_rx - nb_tx);
}
Typically the non_burst_worker main loop consists of following pseudo code:
while(1)
{
uint16_t nb_rx = rte_event_dequeue_burst(&ev, , 1);
if (!nb_rx)
continue;
process(ev);
while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1);
}
Following overhead has been seen on nonburst mode capable PMDs with
burst mode version
- Extra explicit release(PMD does release on implicitly on next
dequeue) and thus avoids the cost additional driver function overhead.
- Extra "for" loop for event processing which compiler cannot detect at
runtime
2) Simplify the application configuration by avoiding the application to
find the correct enqueue and dequeue depth across different PMD.
If burst mode is not supported then, PMD can ignore depth field.
This will enable to write portable applications and makes
RFC eventdev_pipeline application works on OCTEONTX PMD
http://dpdk.org/dev/patchwork/patch/23799/
If an application wishes to get the maximum performance on nonburst
capable PMD then the application can write the code in a way that by
keeping packet processing function as inline functions and launch the
workers based on the capability.
The generic burst based worker still work on those PMDs without
any code change but this scheme needed only when the application wants
to gets the maximum performance out of nonburst capable PMDs.
This patch is based the on the real world test cases
http://dpdk.org/dev/patchwork/patch/24832/, Where without this scheme
20.9% performance drop observed per core.
See worker_wrapper(), perf_queue_worker(), perf_queue_worker_burst()
functions to use this scheme in a portable way without losing performance
on both sets of PMDs and achieving the portability.
http://dpdk.org/dev/patchwork/patch/24832/
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Acked-by: Harry van Haaren <harry.van.haaren@intel.com>
2017-06-14 10:27:32 +05:30
|
|
|
if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
|
|
|
|
(dev_conf->nb_event_port_enqueue_depth >
|
|
|
|
info.max_event_port_enqueue_depth)) {
|
2016-12-06 07:21:46 +05:30
|
|
|
RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
|
|
|
|
dev_id, dev_conf->nb_event_port_enqueue_depth,
|
|
|
|
info.max_event_port_enqueue_depth);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy the dev_conf parameter into the dev structure */
|
|
|
|
memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
|
|
|
|
|
|
|
|
/* Setup new number of queues and reconfigure device. */
|
|
|
|
diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
|
|
|
|
if (diag != 0) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
|
|
|
|
dev_id, diag);
|
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup new number of ports and reconfigure device. */
|
|
|
|
diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
|
|
|
|
if (diag != 0) {
|
|
|
|
rte_event_dev_queue_config(dev, 0);
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
|
|
|
|
dev_id, diag);
|
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Configure the device */
|
|
|
|
diag = (*dev->dev_ops->dev_configure)(dev);
|
|
|
|
if (diag != 0) {
|
|
|
|
RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
|
|
|
|
rte_event_dev_queue_config(dev, 0);
|
|
|
|
rte_event_dev_port_config(dev, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->data->event_dev_cap = info.event_dev_cap;
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_configure(dev_id, dev_conf, diag);
|
2016-12-06 07:21:46 +05:30
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
|
|
|
|
{
|
|
|
|
if (queue_id < dev->data->nb_queues && queue_id <
|
|
|
|
RTE_EVENT_MAX_QUEUES_PER_DEV)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
|
|
|
|
struct rte_event_queue_conf *queue_conf)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (queue_conf == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!is_valid_queue(dev, queue_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
|
|
|
|
memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
|
|
|
|
(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
|
|
|
|
{
|
2017-08-09 14:58:04 -05:00
|
|
|
if (queue_conf &&
|
|
|
|
!(queue_conf->event_queue_cfg &
|
2017-10-25 19:51:42 +05:30
|
|
|
RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
|
2016-12-06 07:21:46 +05:30
|
|
|
((queue_conf->event_queue_cfg &
|
2017-10-25 19:51:42 +05:30
|
|
|
RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
|
|
|
|
(queue_conf->schedule_type
|
|
|
|
== RTE_SCHED_TYPE_ATOMIC)
|
2016-12-06 07:21:46 +05:30
|
|
|
))
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
|
|
|
|
{
|
2017-08-09 14:58:04 -05:00
|
|
|
if (queue_conf &&
|
|
|
|
!(queue_conf->event_queue_cfg &
|
2017-10-25 19:51:42 +05:30
|
|
|
RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
|
2016-12-06 07:21:46 +05:30
|
|
|
((queue_conf->event_queue_cfg &
|
2017-10-25 19:51:42 +05:30
|
|
|
RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
|
|
|
|
(queue_conf->schedule_type
|
|
|
|
== RTE_SCHED_TYPE_ORDERED)
|
2016-12-06 07:21:46 +05:30
|
|
|
))
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
|
|
|
|
const struct rte_event_queue_conf *queue_conf)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
struct rte_event_queue_conf def_conf;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (!is_valid_queue(dev, queue_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_atomic_flows limit */
|
|
|
|
if (is_valid_atomic_queue_conf(queue_conf)) {
|
|
|
|
if (queue_conf->nb_atomic_flows == 0 ||
|
|
|
|
queue_conf->nb_atomic_flows >
|
|
|
|
dev->data->dev_conf.nb_event_queue_flows) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
|
|
|
|
dev_id, queue_id, queue_conf->nb_atomic_flows,
|
|
|
|
dev->data->dev_conf.nb_event_queue_flows);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check nb_atomic_order_sequences limit */
|
|
|
|
if (is_valid_ordered_queue_conf(queue_conf)) {
|
|
|
|
if (queue_conf->nb_atomic_order_sequences == 0 ||
|
|
|
|
queue_conf->nb_atomic_order_sequences >
|
|
|
|
dev->data->dev_conf.nb_event_queue_flows) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
|
|
|
|
dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
|
|
|
|
dev->data->dev_conf.nb_event_queue_flows);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->data->dev_started) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"device %d must be stopped to allow queue setup", dev_id);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
|
|
|
|
|
|
|
|
if (queue_conf == NULL) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
|
|
|
|
queue_conf = &def_conf;
|
|
|
|
}
|
|
|
|
|
2017-09-20 10:21:01 -05:00
|
|
|
dev->data->queues_cfg[queue_id] = *queue_conf;
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
|
2016-12-06 07:21:46 +05:30
|
|
|
return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
|
|
|
|
{
|
|
|
|
if (port_id < dev->data->nb_ports)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
|
|
|
|
struct rte_event_port_conf *port_conf)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (port_conf == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!is_valid_port(dev, port_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
|
|
|
|
memset(port_conf, 0, sizeof(struct rte_event_port_conf));
|
|
|
|
(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
|
|
|
|
const struct rte_event_port_conf *port_conf)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
struct rte_event_port_conf def_conf;
|
|
|
|
int diag;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (!is_valid_port(dev, port_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check new_event_threshold limit */
|
|
|
|
if ((port_conf && !port_conf->new_event_threshold) ||
|
|
|
|
(port_conf && port_conf->new_event_threshold >
|
|
|
|
dev->data->dev_conf.nb_events_limit)) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
|
|
|
|
dev_id, port_id, port_conf->new_event_threshold,
|
|
|
|
dev->data->dev_conf.nb_events_limit);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check dequeue_depth limit */
|
|
|
|
if ((port_conf && !port_conf->dequeue_depth) ||
|
|
|
|
(port_conf && port_conf->dequeue_depth >
|
|
|
|
dev->data->dev_conf.nb_event_port_dequeue_depth)) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
|
|
|
|
dev_id, port_id, port_conf->dequeue_depth,
|
|
|
|
dev->data->dev_conf.nb_event_port_dequeue_depth);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check enqueue_depth limit */
|
|
|
|
if ((port_conf && !port_conf->enqueue_depth) ||
|
|
|
|
(port_conf && port_conf->enqueue_depth >
|
|
|
|
dev->data->dev_conf.nb_event_port_enqueue_depth)) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
|
|
|
|
dev_id, port_id, port_conf->enqueue_depth,
|
|
|
|
dev->data->dev_conf.nb_event_port_enqueue_depth);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-15 13:07:15 -05:00
|
|
|
if (port_conf &&
|
|
|
|
(port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
|
2017-12-11 11:56:31 -06:00
|
|
|
!(dev->data->event_dev_cap &
|
|
|
|
RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"dev%d port%d Implicit release disable not supported",
|
|
|
|
dev_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
if (dev->data->dev_started) {
|
|
|
|
RTE_EDEV_LOG_ERR(
|
|
|
|
"device %d must be stopped to allow port setup", dev_id);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
|
|
|
|
|
|
|
|
if (port_conf == NULL) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
|
|
|
|
port_conf = &def_conf;
|
|
|
|
}
|
|
|
|
|
2017-09-20 10:21:02 -05:00
|
|
|
dev->data->ports_cfg[port_id] = *port_conf;
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
|
|
|
|
|
|
|
|
/* Unlink all the queues from this port(default state after setup) */
|
|
|
|
if (!diag)
|
|
|
|
diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
|
|
|
|
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
|
2016-12-06 07:21:46 +05:30
|
|
|
if (diag < 0)
|
|
|
|
return diag;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-20 14:36:00 +01:00
|
|
|
int
|
|
|
|
rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
|
|
|
|
uint32_t *attr_value)
|
2016-12-06 07:21:46 +05:30
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
2017-09-20 14:36:00 +01:00
|
|
|
if (!attr_value)
|
|
|
|
return -EINVAL;
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
2016-12-06 07:21:46 +05:30
|
|
|
dev = &rte_eventdevs[dev_id];
|
2017-09-20 14:36:00 +01:00
|
|
|
|
|
|
|
switch (attr_id) {
|
|
|
|
case RTE_EVENT_DEV_ATTR_PORT_COUNT:
|
|
|
|
*attr_value = dev->data->nb_ports;
|
|
|
|
break;
|
|
|
|
case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
|
|
|
|
*attr_value = dev->data->nb_queues;
|
|
|
|
break;
|
2017-09-20 14:36:02 +01:00
|
|
|
case RTE_EVENT_DEV_ATTR_STARTED:
|
|
|
|
*attr_value = dev->data->dev_started;
|
|
|
|
break;
|
2017-09-20 14:36:00 +01:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
2017-09-20 14:35:59 +01:00
|
|
|
int
|
|
|
|
rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
|
|
|
|
uint32_t *attr_value)
|
2016-12-06 07:21:46 +05:30
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
2017-09-20 14:36:00 +01:00
|
|
|
|
|
|
|
if (!attr_value)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-09-20 14:35:59 +01:00
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
2016-12-06 07:21:46 +05:30
|
|
|
dev = &rte_eventdevs[dev_id];
|
2017-09-20 14:35:59 +01:00
|
|
|
if (!is_valid_port(dev, port_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
|
2017-09-20 14:35:59 +01:00
|
|
|
switch (attr_id) {
|
|
|
|
case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
|
2017-09-20 10:21:02 -05:00
|
|
|
*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
|
2017-09-20 14:35:59 +01:00
|
|
|
break;
|
|
|
|
case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
|
2017-09-20 10:21:02 -05:00
|
|
|
*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
|
|
|
|
break;
|
|
|
|
case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
|
|
|
|
*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
|
2017-09-20 14:35:59 +01:00
|
|
|
break;
|
2020-10-15 13:07:15 -05:00
|
|
|
case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
|
|
|
|
{
|
|
|
|
uint32_t config;
|
|
|
|
|
|
|
|
config = dev->data->ports_cfg[port_id].event_port_cfg;
|
|
|
|
*attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
|
|
|
|
break;
|
|
|
|
}
|
2017-09-20 14:35:59 +01:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
};
|
|
|
|
return 0;
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
2017-09-20 14:36:01 +01:00
|
|
|
int
|
|
|
|
rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
|
|
|
|
uint32_t *attr_value)
|
|
|
|
{
|
2017-09-20 10:21:01 -05:00
|
|
|
struct rte_event_queue_conf *conf;
|
2017-09-20 14:36:01 +01:00
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
if (!attr_value)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
if (!is_valid_queue(dev, queue_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-09-20 10:21:01 -05:00
|
|
|
conf = &dev->data->queues_cfg[queue_id];
|
|
|
|
|
2017-09-20 14:36:01 +01:00
|
|
|
switch (attr_id) {
|
|
|
|
case RTE_EVENT_QUEUE_ATTR_PRIORITY:
|
|
|
|
*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
|
|
|
|
if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
|
2017-09-20 10:21:01 -05:00
|
|
|
*attr_value = conf->priority;
|
|
|
|
break;
|
|
|
|
case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
|
|
|
|
*attr_value = conf->nb_atomic_flows;
|
|
|
|
break;
|
|
|
|
case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
|
|
|
|
*attr_value = conf->nb_atomic_order_sequences;
|
|
|
|
break;
|
|
|
|
case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
|
|
|
|
*attr_value = conf->event_queue_cfg;
|
2017-09-20 14:36:01 +01:00
|
|
|
break;
|
2017-10-25 19:51:43 +05:30
|
|
|
case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
|
|
|
|
if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
*attr_value = conf->schedule_type;
|
|
|
|
break;
|
2017-09-20 14:36:01 +01:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
};
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
int
|
|
|
|
rte_event_port_link(uint8_t dev_id, uint8_t port_id,
|
|
|
|
const uint8_t queues[], const uint8_t priorities[],
|
|
|
|
uint16_t nb_links)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
|
|
|
|
uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
|
|
|
|
uint16_t *links_map;
|
|
|
|
int i, diag;
|
|
|
|
|
2019-07-04 11:03:30 +01:00
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
|
2016-12-06 07:21:46 +05:30
|
|
|
dev = &rte_eventdevs[dev_id];
|
2017-11-14 16:44:10 -06:00
|
|
|
|
|
|
|
if (*dev->dev_ops->port_link == NULL) {
|
2019-02-26 13:34:22 -08:00
|
|
|
RTE_EDEV_LOG_ERR("Function not supported\n");
|
2019-07-04 11:03:30 +01:00
|
|
|
rte_errno = ENOTSUP;
|
2017-11-14 16:44:10 -06:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
if (!is_valid_port(dev, port_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
|
2019-07-04 11:03:30 +01:00
|
|
|
rte_errno = EINVAL;
|
2017-11-14 16:44:10 -06:00
|
|
|
return 0;
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
if (queues == NULL) {
|
|
|
|
for (i = 0; i < dev->data->nb_queues; i++)
|
|
|
|
queues_list[i] = i;
|
|
|
|
|
|
|
|
queues = queues_list;
|
|
|
|
nb_links = dev->data->nb_queues;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priorities == NULL) {
|
|
|
|
for (i = 0; i < nb_links; i++)
|
|
|
|
priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
|
|
|
|
|
|
|
|
priorities = priorities_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nb_links; i++)
|
2017-11-14 16:44:10 -06:00
|
|
|
if (queues[i] >= dev->data->nb_queues) {
|
2019-07-04 11:03:30 +01:00
|
|
|
rte_errno = EINVAL;
|
2017-11-14 16:44:10 -06:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
|
2017-02-07 00:34:37 +05:30
|
|
|
diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
|
|
|
|
queues, priorities, nb_links);
|
2016-12-06 07:21:46 +05:30
|
|
|
if (diag < 0)
|
|
|
|
return diag;
|
|
|
|
|
|
|
|
links_map = dev->data->links_map;
|
|
|
|
/* Point links_map to this port specific area */
|
|
|
|
links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
|
|
|
|
for (i = 0; i < diag; i++)
|
|
|
|
links_map[queues[i]] = (uint8_t)priorities[i];
|
|
|
|
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
|
2016-12-06 07:21:46 +05:30
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
|
|
|
|
uint8_t queues[], uint16_t nb_unlinks)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
|
2017-12-13 00:28:09 +05:30
|
|
|
int i, diag, j;
|
2016-12-06 07:21:46 +05:30
|
|
|
uint16_t *links_map;
|
|
|
|
|
2019-07-04 11:03:30 +01:00
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
|
2016-12-06 07:21:46 +05:30
|
|
|
dev = &rte_eventdevs[dev_id];
|
2017-11-14 16:44:10 -06:00
|
|
|
|
|
|
|
if (*dev->dev_ops->port_unlink == NULL) {
|
2019-02-26 13:34:22 -08:00
|
|
|
RTE_EDEV_LOG_ERR("Function not supported");
|
2019-07-04 11:03:30 +01:00
|
|
|
rte_errno = ENOTSUP;
|
2017-11-14 16:44:10 -06:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
if (!is_valid_port(dev, port_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
|
2019-07-04 11:03:30 +01:00
|
|
|
rte_errno = EINVAL;
|
2017-11-14 16:44:10 -06:00
|
|
|
return 0;
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
2017-12-13 00:28:09 +05:30
|
|
|
links_map = dev->data->links_map;
|
|
|
|
/* Point links_map to this port specific area */
|
|
|
|
links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
if (queues == NULL) {
|
2017-12-13 00:28:09 +05:30
|
|
|
j = 0;
|
|
|
|
for (i = 0; i < dev->data->nb_queues; i++) {
|
|
|
|
if (links_map[i] !=
|
|
|
|
EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
|
|
|
|
all_queues[j] = i;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
queues = all_queues;
|
2017-12-13 00:28:09 +05:30
|
|
|
} else {
|
|
|
|
for (j = 0; j < nb_unlinks; j++) {
|
|
|
|
if (links_map[queues[j]] ==
|
|
|
|
EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
|
|
|
|
break;
|
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
2017-12-13 00:28:09 +05:30
|
|
|
nb_unlinks = j;
|
2016-12-06 07:21:46 +05:30
|
|
|
for (i = 0; i < nb_unlinks; i++)
|
2017-11-14 16:44:10 -06:00
|
|
|
if (queues[i] >= dev->data->nb_queues) {
|
2019-07-04 11:03:30 +01:00
|
|
|
rte_errno = EINVAL;
|
2017-11-14 16:44:10 -06:00
|
|
|
return 0;
|
|
|
|
}
|
2016-12-06 07:21:46 +05:30
|
|
|
|
2017-02-07 00:34:37 +05:30
|
|
|
diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
|
|
|
|
queues, nb_unlinks);
|
2016-12-06 07:21:46 +05:30
|
|
|
|
|
|
|
if (diag < 0)
|
|
|
|
return diag;
|
|
|
|
|
|
|
|
for (i = 0; i < diag; i++)
|
|
|
|
links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
|
|
|
|
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
|
2016-12-06 07:21:46 +05:30
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
2019-04-19 17:40:40 +05:30
|
|
|
int
|
2018-09-24 09:23:31 +01:00
|
|
|
rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
if (!is_valid_port(dev, port_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return 0 if the PMD does not implement unlinks in progress.
|
|
|
|
* This allows PMDs which handle unlink synchronously to not implement
|
|
|
|
* this function at all.
|
|
|
|
*/
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
|
|
|
|
|
|
|
|
return (*dev->dev_ops->port_unlinks_in_progress)(dev,
|
|
|
|
dev->data->ports[port_id]);
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
int
|
|
|
|
rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
|
|
|
|
uint8_t queues[], uint8_t priorities[])
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
uint16_t *links_map;
|
|
|
|
int i, count = 0;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
if (!is_valid_port(dev, port_id)) {
|
|
|
|
RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
links_map = dev->data->links_map;
|
|
|
|
/* Point links_map to this port specific area */
|
|
|
|
links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
|
2017-02-06 10:59:37 +05:30
|
|
|
for (i = 0; i < dev->data->nb_queues; i++) {
|
2016-12-06 07:21:46 +05:30
|
|
|
if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
|
|
|
|
queues[count] = i;
|
|
|
|
priorities[count] = (uint8_t)links_map[i];
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
|
|
|
|
uint64_t *timeout_ticks)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
|
|
|
|
|
|
|
|
if (timeout_ticks == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-03 22:57:43 +05:30
|
|
|
return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
2017-10-25 20:20:27 +05:30
|
|
|
int
|
|
|
|
rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (service_id == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (dev->data->service_inited)
|
|
|
|
*service_id = dev->data->service_id;
|
|
|
|
|
|
|
|
return dev->data->service_inited ? 0 : -ESRCH;
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
int
|
|
|
|
rte_event_dev_dump(uint8_t dev_id, FILE *f)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
|
|
|
|
|
|
|
|
(*dev->dev_ops->dump)(dev, f);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-03-10 19:43:19 +00:00
|
|
|
static int
|
|
|
|
xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
|
|
|
|
uint8_t queue_port_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev = &rte_eventdevs[dev_id];
|
|
|
|
if (dev->dev_ops->xstats_get_names != NULL)
|
|
|
|
return (*dev->dev_ops->xstats_get_names)(dev, mode,
|
|
|
|
queue_port_id,
|
|
|
|
NULL, NULL, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_dev_xstats_names_get(uint8_t dev_id,
|
|
|
|
enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
|
|
|
|
struct rte_event_dev_xstats_name *xstats_names,
|
|
|
|
unsigned int *ids, unsigned int size)
|
|
|
|
{
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
|
|
|
|
const int cnt_expected_entries = xstats_get_count(dev_id, mode,
|
|
|
|
queue_port_id);
|
|
|
|
if (xstats_names == NULL || cnt_expected_entries < 0 ||
|
|
|
|
(int)size < cnt_expected_entries)
|
|
|
|
return cnt_expected_entries;
|
|
|
|
|
|
|
|
/* dev_id checked above */
|
|
|
|
const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (dev->dev_ops->xstats_get_names != NULL)
|
|
|
|
return (*dev->dev_ops->xstats_get_names)(dev, mode,
|
|
|
|
queue_port_id, xstats_names, ids, size);
|
|
|
|
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* retrieve eventdev extended statistics */
|
|
|
|
int
|
|
|
|
rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
|
|
|
|
uint8_t queue_port_id, const unsigned int ids[],
|
|
|
|
uint64_t values[], unsigned int n)
|
|
|
|
{
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
|
|
|
|
const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
/* implemented by the driver */
|
|
|
|
if (dev->dev_ops->xstats_get != NULL)
|
|
|
|
return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
|
|
|
|
ids, values, n);
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
|
|
|
|
unsigned int *id)
|
|
|
|
{
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
|
|
|
|
const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
|
|
|
|
unsigned int temp = -1;
|
|
|
|
|
|
|
|
if (id != NULL)
|
|
|
|
*id = (unsigned int)-1;
|
|
|
|
else
|
|
|
|
id = &temp; /* ensure driver never gets a NULL value */
|
|
|
|
|
|
|
|
/* implemented by driver */
|
|
|
|
if (dev->dev_ops->xstats_get_by_name != NULL)
|
|
|
|
return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rte_event_dev_xstats_reset(uint8_t dev_id,
|
|
|
|
enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
|
|
|
|
const uint32_t ids[], uint32_t nb_ids)
|
|
|
|
{
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
struct rte_eventdev *dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (dev->dev_ops->xstats_reset != NULL)
|
|
|
|
return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
|
|
|
|
ids, nb_ids);
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2018-01-11 15:51:47 +05:30
|
|
|
int rte_event_dev_selftest(uint8_t dev_id)
|
|
|
|
{
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
struct rte_eventdev *dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
if (dev->dev_ops->dev_selftest != NULL)
|
|
|
|
return (*dev->dev_ops->dev_selftest)();
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
int
|
|
|
|
rte_event_dev_start(uint8_t dev_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
int diag;
|
|
|
|
|
|
|
|
RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
|
|
|
|
|
|
|
|
if (dev->data->dev_started != 0) {
|
|
|
|
RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
|
|
|
|
dev_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
diag = (*dev->dev_ops->dev_start)(dev);
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_start(dev_id, diag);
|
2016-12-06 07:21:46 +05:30
|
|
|
if (diag == 0)
|
|
|
|
dev->data->dev_started = 1;
|
|
|
|
else
|
|
|
|
return diag;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-02 13:03:30 -05:00
|
|
|
int
|
|
|
|
rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
|
|
|
|
eventdev_stop_flush_t callback, void *userdata)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
dev->dev_ops->dev_stop_flush = callback;
|
|
|
|
dev->data->dev_stop_flush_arg = userdata;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:21:46 +05:30
|
|
|
void
|
|
|
|
rte_event_dev_stop(uint8_t dev_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
|
|
|
|
|
|
|
|
if (dev->data->dev_started == 0) {
|
|
|
|
RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
|
|
|
|
dev_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->data->dev_started = 0;
|
|
|
|
(*dev->dev_ops->dev_stop)(dev);
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_stop(dev_id);
|
2016-12-06 07:21:46 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_dev_close(uint8_t dev_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
|
|
|
|
|
|
|
|
/* Device must be stopped before it can be closed */
|
|
|
|
if (dev->data->dev_started == 1) {
|
|
|
|
RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
|
|
|
|
dev_id);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2020-04-23 00:33:46 +05:30
|
|
|
rte_eventdev_trace_close(dev_id);
|
2016-12-06 07:21:46 +05:30
|
|
|
return (*dev->dev_ops->dev_close)(dev);
|
|
|
|
}
|
2016-12-06 07:54:15 +05:30
|
|
|
|
|
|
|
static inline int
|
|
|
|
rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
|
|
|
|
int socket_id)
|
|
|
|
{
|
|
|
|
char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
/* Generate memzone name */
|
|
|
|
n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
|
|
|
|
if (n >= (int)sizeof(mz_name))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
mz = rte_memzone_reserve(mz_name,
|
|
|
|
sizeof(struct rte_eventdev_data),
|
|
|
|
socket_id, 0);
|
|
|
|
} else
|
|
|
|
mz = rte_memzone_lookup(mz_name);
|
|
|
|
|
|
|
|
if (mz == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
*data = mz->addr;
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
|
|
|
|
memset(*data, 0, sizeof(struct rte_eventdev_data));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t
|
|
|
|
rte_eventdev_find_free_device_index(void)
|
|
|
|
{
|
|
|
|
uint8_t dev_id;
|
|
|
|
|
|
|
|
for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
|
|
|
|
if (rte_eventdevs[dev_id].attached ==
|
|
|
|
RTE_EVENTDEV_DETACHED)
|
|
|
|
return dev_id;
|
|
|
|
}
|
|
|
|
return RTE_EVENT_MAX_DEVS;
|
|
|
|
}
|
|
|
|
|
2018-09-20 23:11:13 +05:30
|
|
|
static uint16_t
|
|
|
|
rte_event_tx_adapter_enqueue(__rte_unused void *port,
|
|
|
|
__rte_unused struct rte_event ev[],
|
|
|
|
__rte_unused uint16_t nb_events)
|
|
|
|
{
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-06 07:54:15 +05:30
|
|
|
struct rte_eventdev *
|
|
|
|
rte_event_pmd_allocate(const char *name, int socket_id)
|
|
|
|
{
|
|
|
|
struct rte_eventdev *eventdev;
|
|
|
|
uint8_t dev_id;
|
|
|
|
|
|
|
|
if (rte_event_pmd_get_named_dev(name) != NULL) {
|
|
|
|
RTE_EDEV_LOG_ERR("Event device with name %s already "
|
|
|
|
"allocated!", name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_id = rte_eventdev_find_free_device_index();
|
|
|
|
if (dev_id == RTE_EVENT_MAX_DEVS) {
|
|
|
|
RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
eventdev = &rte_eventdevs[dev_id];
|
|
|
|
|
2018-09-20 23:11:13 +05:30
|
|
|
eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
|
2019-10-11 18:33:06 +05:30
|
|
|
eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
|
2018-09-20 23:11:13 +05:30
|
|
|
|
2016-12-06 07:54:15 +05:30
|
|
|
if (eventdev->data == NULL) {
|
|
|
|
struct rte_eventdev_data *eventdev_data = NULL;
|
|
|
|
|
|
|
|
int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
|
|
|
|
socket_id);
|
|
|
|
|
|
|
|
if (retval < 0 || eventdev_data == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
eventdev->data = eventdev_data;
|
|
|
|
|
2020-04-27 23:40:38 +05:30
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
2016-12-06 07:54:15 +05:30
|
|
|
|
2020-04-27 23:40:38 +05:30
|
|
|
strlcpy(eventdev->data->name, name,
|
|
|
|
RTE_EVENTDEV_NAME_MAX_LEN);
|
2016-12-06 07:54:15 +05:30
|
|
|
|
2020-04-27 23:40:38 +05:30
|
|
|
eventdev->data->dev_id = dev_id;
|
|
|
|
eventdev->data->socket_id = socket_id;
|
|
|
|
eventdev->data->dev_started = 0;
|
|
|
|
}
|
2016-12-06 07:54:15 +05:30
|
|
|
|
2020-04-27 23:40:38 +05:30
|
|
|
eventdev->attached = RTE_EVENTDEV_ATTACHED;
|
2016-12-06 07:54:15 +05:30
|
|
|
eventdev_globals.nb_devs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return eventdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_event_pmd_release(struct rte_eventdev *eventdev)
|
|
|
|
{
|
|
|
|
int ret;
|
2017-02-06 10:53:38 +05:30
|
|
|
char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
|
|
|
|
const struct rte_memzone *mz;
|
2016-12-06 07:54:15 +05:30
|
|
|
|
|
|
|
if (eventdev == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
eventdev->attached = RTE_EVENTDEV_DETACHED;
|
|
|
|
eventdev_globals.nb_devs--;
|
|
|
|
|
2017-02-06 10:53:38 +05:30
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
rte_free(eventdev->data->dev_private);
|
|
|
|
|
|
|
|
/* Generate memzone name */
|
|
|
|
ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
|
|
|
|
eventdev->data->dev_id);
|
|
|
|
if (ret >= (int)sizeof(mz_name))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mz = rte_memzone_lookup(mz_name);
|
|
|
|
if (mz == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = rte_memzone_free(mz);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
eventdev->data = NULL;
|
2016-12-06 07:54:15 +05:30
|
|
|
return 0;
|
|
|
|
}
|
2020-09-18 13:39:22 -04:00
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_dev_list(const char *cmd __rte_unused,
|
|
|
|
const char *params __rte_unused,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
uint8_t dev_id;
|
|
|
|
int ndev = rte_event_dev_count();
|
|
|
|
|
|
|
|
if (ndev < 1)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
|
|
|
|
for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
|
|
|
|
if (rte_eventdevs[dev_id].attached ==
|
|
|
|
RTE_EVENTDEV_ATTACHED)
|
|
|
|
rte_tel_data_add_array_int(d, dev_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_port_list(const char *cmd __rte_unused,
|
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint8_t dev_id;
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
char *end_param;
|
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
dev_id = strtoul(params, &end_param, 10);
|
|
|
|
if (*end_param != '\0')
|
|
|
|
RTE_EDEV_LOG_DEBUG(
|
|
|
|
"Extra parameters passed to eventdev telemetry command, ignoring");
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
|
|
|
|
for (i = 0; i < dev->data->nb_ports; i++)
|
|
|
|
rte_tel_data_add_array_int(d, i);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_queue_list(const char *cmd __rte_unused,
|
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint8_t dev_id;
|
|
|
|
struct rte_eventdev *dev;
|
|
|
|
char *end_param;
|
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
dev_id = strtoul(params, &end_param, 10);
|
|
|
|
if (*end_param != '\0')
|
|
|
|
RTE_EDEV_LOG_DEBUG(
|
|
|
|
"Extra parameters passed to eventdev telemetry command, ignoring");
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
dev = &rte_eventdevs[dev_id];
|
|
|
|
|
|
|
|
rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
|
|
|
|
for (i = 0; i < dev->data->nb_queues; i++)
|
|
|
|
rte_tel_data_add_array_int(d, i);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_queue_links(const char *cmd __rte_unused,
|
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
int i, ret, port_id = 0;
|
|
|
|
char *end_param;
|
|
|
|
uint8_t dev_id;
|
|
|
|
uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
|
|
|
|
uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
|
|
|
|
const char *p_param;
|
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Get dev ID from parameter string */
|
|
|
|
dev_id = strtoul(params, &end_param, 10);
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
|
|
|
|
p_param = strtok(end_param, ",");
|
|
|
|
if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
port_id = strtoul(p_param, &end_param, 10);
|
|
|
|
p_param = strtok(NULL, "\0");
|
|
|
|
if (p_param != NULL)
|
|
|
|
RTE_EDEV_LOG_DEBUG(
|
|
|
|
"Extra parameters passed to eventdev telemetry command, ignoring");
|
|
|
|
|
|
|
|
ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
|
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rte_tel_data_start_dict(d);
|
|
|
|
for (i = 0; i < ret; i++) {
|
|
|
|
char qid_name[32];
|
|
|
|
|
|
|
|
snprintf(qid_name, 31, "qid_%u", queues[i]);
|
|
|
|
rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
eventdev_build_telemetry_data(int dev_id,
|
|
|
|
enum rte_event_dev_xstats_mode mode,
|
|
|
|
int port_queue_id,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
struct rte_event_dev_xstats_name *xstat_names;
|
|
|
|
unsigned int *ids;
|
|
|
|
uint64_t *values;
|
|
|
|
int i, ret, num_xstats;
|
|
|
|
|
|
|
|
num_xstats = rte_event_dev_xstats_names_get(dev_id,
|
|
|
|
mode,
|
|
|
|
port_queue_id,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0);
|
|
|
|
|
|
|
|
if (num_xstats < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* use one malloc for names */
|
|
|
|
xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
|
|
|
|
* num_xstats);
|
|
|
|
if (xstat_names == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ids = malloc((sizeof(unsigned int)) * num_xstats);
|
|
|
|
if (ids == NULL) {
|
|
|
|
free(xstat_names);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
values = malloc((sizeof(uint64_t)) * num_xstats);
|
|
|
|
if (values == NULL) {
|
|
|
|
free(xstat_names);
|
|
|
|
free(ids);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
|
|
|
|
xstat_names, ids, num_xstats);
|
|
|
|
if (ret < 0 || ret > num_xstats) {
|
|
|
|
free(xstat_names);
|
|
|
|
free(ids);
|
|
|
|
free(values);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
|
|
|
|
ids, values, num_xstats);
|
|
|
|
if (ret < 0 || ret > num_xstats) {
|
|
|
|
free(xstat_names);
|
|
|
|
free(ids);
|
|
|
|
free(values);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_tel_data_start_dict(d);
|
|
|
|
for (i = 0; i < num_xstats; i++)
|
|
|
|
rte_tel_data_add_dict_u64(d, xstat_names[i].name,
|
|
|
|
values[i]);
|
|
|
|
|
|
|
|
free(xstat_names);
|
|
|
|
free(ids);
|
|
|
|
free(values);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_dev_xstats(const char *cmd __rte_unused,
|
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
int dev_id;
|
|
|
|
enum rte_event_dev_xstats_mode mode;
|
|
|
|
char *end_param;
|
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Get dev ID from parameter string */
|
|
|
|
dev_id = strtoul(params, &end_param, 10);
|
|
|
|
if (*end_param != '\0')
|
|
|
|
RTE_EDEV_LOG_DEBUG(
|
|
|
|
"Extra parameters passed to eventdev telemetry command, ignoring");
|
|
|
|
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
|
|
|
|
mode = RTE_EVENT_DEV_XSTATS_DEVICE;
|
|
|
|
return eventdev_build_telemetry_data(dev_id, mode, 0, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_port_xstats(const char *cmd __rte_unused,
|
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
int dev_id;
|
|
|
|
int port_queue_id = 0;
|
|
|
|
enum rte_event_dev_xstats_mode mode;
|
|
|
|
char *end_param;
|
|
|
|
const char *p_param;
|
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Get dev ID from parameter string */
|
|
|
|
dev_id = strtoul(params, &end_param, 10);
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
|
|
|
|
p_param = strtok(end_param, ",");
|
|
|
|
mode = RTE_EVENT_DEV_XSTATS_PORT;
|
|
|
|
|
|
|
|
if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
port_queue_id = strtoul(p_param, &end_param, 10);
|
|
|
|
|
|
|
|
p_param = strtok(NULL, "\0");
|
|
|
|
if (p_param != NULL)
|
|
|
|
RTE_EDEV_LOG_DEBUG(
|
|
|
|
"Extra parameters passed to eventdev telemetry command, ignoring");
|
|
|
|
|
|
|
|
return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_queue_xstats(const char *cmd __rte_unused,
|
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
int dev_id;
|
|
|
|
int port_queue_id = 0;
|
|
|
|
enum rte_event_dev_xstats_mode mode;
|
|
|
|
char *end_param;
|
|
|
|
const char *p_param;
|
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Get dev ID from parameter string */
|
|
|
|
dev_id = strtoul(params, &end_param, 10);
|
|
|
|
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
|
|
|
|
|
|
|
|
p_param = strtok(end_param, ",");
|
|
|
|
mode = RTE_EVENT_DEV_XSTATS_QUEUE;
|
|
|
|
|
|
|
|
if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
port_queue_id = strtoul(p_param, &end_param, 10);
|
|
|
|
|
|
|
|
p_param = strtok(NULL, "\0");
|
|
|
|
if (p_param != NULL)
|
|
|
|
RTE_EDEV_LOG_DEBUG(
|
|
|
|
"Extra parameters passed to eventdev telemetry command, ignoring");
|
|
|
|
|
|
|
|
return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_INIT(eventdev_init_telemetry)
|
|
|
|
{
|
|
|
|
rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
|
|
|
|
"Returns list of available eventdevs. Takes no parameters");
|
|
|
|
rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
|
|
|
|
"Returns list of available ports. Parameter: DevID");
|
|
|
|
rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
|
|
|
|
"Returns list of available queues. Parameter: DevID");
|
|
|
|
|
|
|
|
rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
|
|
|
|
"Returns stats for an eventdev. Parameter: DevID");
|
|
|
|
rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
|
|
|
|
"Returns stats for an eventdev port. Params: DevID,PortID");
|
|
|
|
rte_telemetry_register_cmd("/eventdev/queue_xstats",
|
|
|
|
handle_queue_xstats,
|
|
|
|
"Returns stats for an eventdev queue. Params: DevID,QueueID");
|
|
|
|
rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
|
|
|
|
"Returns links for an eventdev port. Params: DevID,QueueID");
|
|
|
|
}
|