eventdev: add weight and affinity attributes to queue conf

Added new fields to represent event queue weight and affinity in
rte_event_queue_conf structure. Internal op to get queue attribute is
removed as it is no longer needed. Updated driver to use the new field.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Shijith Thotton 2022-08-10 13:06:52 +05:30 committed by Jerin Jacob
parent 3d9d8adf8c
commit 2f279a1b6e
9 changed files with 33 additions and 74 deletions

View File

@ -175,9 +175,6 @@ Deprecation Notices
``rte_event_vector::elem_offset`` gives the number of valid elements left
to process from the ``rte_event_vector::elem_offset``.
* eventdev: New fields to represent event queue weight and affinity
will be added to ``rte_event_queue_conf`` structure in DPDK 22.11.
* metrics: The function ``rte_metrics_init`` will have a non-void return
in order to notify errors instead of calling ``rte_exit``.

View File

@ -237,6 +237,9 @@ ABI Changes
* eventdev: Added ``evtim_drop_count`` field
to ``rte_event_timer_adapter_stats`` structure.
* eventdev: Added ``weight`` and ``affinity`` fields
to ``rte_event_queue_conf`` structure.
Known Issues
------------

View File

@ -1064,7 +1064,6 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.queue_def_conf = cnxk_sso_queue_def_conf,
.queue_setup = cnxk_sso_queue_setup,
.queue_release = cnxk_sso_queue_release,
.queue_attr_get = cnxk_sso_queue_attribute_get,
.queue_attr_set = cnxk_sso_queue_attribute_set,
.port_def_conf = cnxk_sso_port_def_conf,

View File

@ -1151,7 +1151,6 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.queue_def_conf = cnxk_sso_queue_def_conf,
.queue_setup = cnxk_sso_queue_setup,
.queue_release = cnxk_sso_queue_release,
.queue_attr_get = cnxk_sso_queue_attribute_get,
.queue_attr_set = cnxk_sso_queue_attribute_set,
.port_def_conf = cnxk_sso_port_def_conf,

View File

@ -200,6 +200,8 @@ cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
queue_conf->nb_atomic_order_sequences = (1ULL << 20);
queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
queue_conf->weight = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
queue_conf->affinity = RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
}
int
@ -209,18 +211,12 @@ cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
uint8_t priority, weight, affinity;
/* Default weight and affinity */
dev->mlt_prio[queue_id].weight = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
dev->mlt_prio[queue_id].affinity = RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
priority = CNXK_QOS_NORMALIZE(queue_conf->priority, 0,
RTE_EVENT_DEV_PRIORITY_LOWEST,
CNXK_SSO_PRIORITY_CNT);
weight = CNXK_QOS_NORMALIZE(
dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
weight = CNXK_QOS_NORMALIZE(queue_conf->weight, CNXK_SSO_WEIGHT_MIN,
RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
affinity = CNXK_QOS_NORMALIZE(queue_conf->affinity, 0, RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
CNXK_SSO_AFFINITY_CNT);
plt_sso_dbg("Queue=%u prio=%u weight=%u affinity=%u", queue_id,
@ -237,22 +233,6 @@ cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
RTE_SET_USED(queue_id);
}
int
cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t queue_id,
uint32_t attr_id, uint32_t *attr_value)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
if (attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT)
*attr_value = dev->mlt_prio[queue_id].weight;
else if (attr_id == RTE_EVENT_QUEUE_ATTR_AFFINITY)
*attr_value = dev->mlt_prio[queue_id].affinity;
else
return -EINVAL;
return 0;
}
int
cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
uint32_t attr_id, uint64_t attr_value)
@ -268,10 +248,10 @@ cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
conf->priority = attr_value;
break;
case RTE_EVENT_QUEUE_ATTR_WEIGHT:
dev->mlt_prio[queue_id].weight = attr_value;
conf->weight = attr_value;
break;
case RTE_EVENT_QUEUE_ATTR_AFFINITY:
dev->mlt_prio[queue_id].affinity = attr_value;
conf->affinity = attr_value;
break;
case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
@ -288,11 +268,9 @@ cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
priority = CNXK_QOS_NORMALIZE(conf->priority, 0,
RTE_EVENT_DEV_PRIORITY_LOWEST,
CNXK_SSO_PRIORITY_CNT);
weight = CNXK_QOS_NORMALIZE(
dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
weight = CNXK_QOS_NORMALIZE(conf->weight, CNXK_SSO_WEIGHT_MIN,
RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
affinity = CNXK_QOS_NORMALIZE(conf->affinity, 0, RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
CNXK_SSO_AFFINITY_CNT);
return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,

View File

@ -89,11 +89,6 @@ struct cnxk_sso_qos {
uint16_t iaq_prcnt;
};
struct cnxk_sso_mlt_prio {
uint8_t weight;
uint8_t affinity;
};
struct cnxk_sso_evdev {
struct roc_sso sso;
uint8_t max_event_queues;
@ -125,7 +120,6 @@ struct cnxk_sso_evdev {
uint16_t vec_pool_cnt;
uint64_t *vec_pools;
struct cnxk_timesync_info *tstamp[RTE_MAX_ETHPORTS];
struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
/* Dev args */
uint32_t xae_cnt;
uint8_t qos_queue_cnt;
@ -253,9 +247,6 @@ void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
const struct rte_event_queue_conf *queue_conf);
void cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id);
int cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev,
uint8_t queue_id, uint32_t attr_id,
uint32_t *attr_value);
int cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev,
uint8_t queue_id, uint32_t attr_id,
uint64_t attr_value);

View File

@ -344,26 +344,6 @@ typedef int (*eventdev_queue_setup_t)(struct rte_eventdev *dev,
typedef void (*eventdev_queue_release_t)(struct rte_eventdev *dev,
uint8_t queue_id);
/**
* Get an event queue attribute at runtime.
*
* @param dev
* Event device pointer
* @param queue_id
* Event queue index
* @param attr_id
* Event queue attribute id
* @param[out] attr_value
* Event queue attribute value
*
* @return
* - 0: Success.
* - <0: Error code on failure.
*/
typedef int (*eventdev_queue_attr_get_t)(struct rte_eventdev *dev,
uint8_t queue_id, uint32_t attr_id,
uint32_t *attr_value);
/**
* Set an event queue attribute at runtime.
*
@ -1312,8 +1292,6 @@ struct eventdev_ops {
/**< Set up an event queue. */
eventdev_queue_release_t queue_release;
/**< Release an event queue. */
eventdev_queue_attr_get_t queue_attr_get;
/**< Get an event queue attribute. */
eventdev_queue_attr_set_t queue_attr_set;
/**< Set an event queue attribute. */

View File

@ -874,15 +874,13 @@ rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
break;
case RTE_EVENT_QUEUE_ATTR_WEIGHT:
*attr_value = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
if (dev->dev_ops->queue_attr_get)
return (*dev->dev_ops->queue_attr_get)(
dev, queue_id, attr_id, attr_value);
if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
*attr_value = conf->weight;
break;
case RTE_EVENT_QUEUE_ATTR_AFFINITY:
*attr_value = RTE_EVENT_QUEUE_AFFINITY_LOWEST;
if (dev->dev_ops->queue_attr_get)
return (*dev->dev_ops->queue_attr_get)(
dev, queue_id, attr_id, attr_value);
if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
*attr_value = conf->affinity;
break;
default:
return -EINVAL;

View File

@ -640,6 +640,22 @@ struct rte_event_queue_conf {
* event device supported priority value.
* Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
*/
uint8_t weight;
/**< Weight of the event queue relative to other event queues.
* The requested weight should be in the range of
* [RTE_EVENT_DEV_WEIGHT_HIGHEST, RTE_EVENT_DEV_WEIGHT_LOWEST].
* The implementation shall normalize the requested weight to event
* device supported weight value.
* Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
*/
uint8_t affinity;
/**< Affinity of the event queue relative to other event queues.
* The requested affinity should be in the range of
* [RTE_EVENT_DEV_AFFINITY_HIGHEST, RTE_EVENT_DEV_AFFINITY_LOWEST].
* The implementation shall normalize the requested affinity to event
* device supported affinity value.
* Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability.
*/
};
/**