event/cnxk: add common configuration validation

Add configuration validation, port and queue configuration
functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2021-05-04 05:56:56 +05:30 committed by Jerin Jacob
parent 95f42ef92c
commit 5512c7de85
2 changed files with 76 additions and 0 deletions

View File

@ -28,6 +28,76 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
int
cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
{
struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
uint32_t deq_tmo_ns;
deq_tmo_ns = conf->dequeue_timeout_ns;
if (deq_tmo_ns == 0)
deq_tmo_ns = dev->min_dequeue_timeout_ns;
if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
deq_tmo_ns > dev->max_dequeue_timeout_ns) {
plt_err("Unsupported dequeue timeout requested");
return -EINVAL;
}
if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
dev->is_timeout_deq = 1;
dev->deq_tmo_ns = deq_tmo_ns;
if (!conf->nb_event_queues || !conf->nb_event_ports ||
conf->nb_event_ports > dev->max_event_ports ||
conf->nb_event_queues > dev->max_event_queues) {
plt_err("Unsupported event queues/ports requested");
return -EINVAL;
}
if (conf->nb_event_port_dequeue_depth > 1) {
plt_err("Unsupported event port deq depth requested");
return -EINVAL;
}
if (conf->nb_event_port_enqueue_depth > 1) {
plt_err("Unsupported event port enq depth requested");
return -EINVAL;
}
dev->nb_event_queues = conf->nb_event_queues;
dev->nb_event_ports = conf->nb_event_ports;
return 0;
}
void
cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
struct rte_event_queue_conf *queue_conf)
{
RTE_SET_USED(event_dev);
RTE_SET_USED(queue_id);
queue_conf->nb_atomic_flows = (1ULL << 20);
queue_conf->nb_atomic_order_sequences = (1ULL << 20);
queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
}
void
cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
struct rte_event_port_conf *port_conf)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
RTE_SET_USED(port_id);
port_conf->new_event_threshold = dev->max_num_events;
port_conf->dequeue_depth = 1;
port_conf->enqueue_depth = 1;
}
int
cnxk_sso_init(struct rte_eventdev *event_dev)
{

View File

@ -22,6 +22,7 @@ struct cnxk_sso_evdev {
uint8_t is_timeout_deq;
uint8_t nb_event_queues;
uint8_t nb_event_ports;
uint32_t deq_tmo_ns;
uint32_t min_dequeue_timeout_ns;
uint32_t max_dequeue_timeout_ns;
int32_t max_num_events;
@ -41,5 +42,10 @@ int cnxk_sso_fini(struct rte_eventdev *event_dev);
int cnxk_sso_remove(struct rte_pci_device *pci_dev);
void cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
struct rte_event_dev_info *dev_info);
int cnxk_sso_dev_validate(const struct rte_eventdev *event_dev);
void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
struct rte_event_queue_conf *queue_conf);
void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
struct rte_event_port_conf *port_conf);
#endif /* __CNXK_EVENTDEV_H__ */