event/cnxk: support setting queue attributes at runtime

Added API to set queue attributes at runtime and API to get weight and
affinity.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Shijith Thotton 2022-05-16 23:05:51 +05:30 committed by Jerin Jacob
parent be541d3758
commit 7da7925f99
5 changed files with 112 additions and 5 deletions

View File

@ -12,6 +12,7 @@ runtime_port_link = Y
multiple_queue_port = Y
carry_flow_id = Y
maintenance_free = Y
runtime_queue_attr = y
[Eth Rx adapter Features]
internal_port = Y

View File

@ -909,9 +909,13 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
static struct eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
.queue_def_conf = cnxk_sso_queue_def_conf,
.queue_setup = cnxk_sso_queue_setup,
.queue_release = cnxk_sso_queue_release,
.queue_attr_get = cnxk_sso_queue_attribute_get,
.queue_attr_set = cnxk_sso_queue_attribute_set,
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn10k_sso_port_setup,
.port_release = cn10k_sso_port_release,

View File

@ -1141,9 +1141,13 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
static struct eventdev_ops cn9k_sso_dev_ops = {
.dev_infos_get = cn9k_sso_info_get,
.dev_configure = cn9k_sso_dev_configure,
.queue_def_conf = cnxk_sso_queue_def_conf,
.queue_setup = cnxk_sso_queue_setup,
.queue_release = cnxk_sso_queue_release,
.queue_attr_get = cnxk_sso_queue_attribute_get,
.queue_attr_set = cnxk_sso_queue_attribute_set,
.port_def_conf = cnxk_sso_port_def_conf,
.port_setup = cn9k_sso_port_setup,
.port_release = cn9k_sso_port_release,

View File

@ -120,7 +120,8 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
RTE_EVENT_DEV_CAP_NONSEQ_MODE |
RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
RTE_EVENT_DEV_CAP_MAINTENANCE_FREE |
RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR;
}
int
@ -300,11 +301,27 @@ cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
const struct rte_event_queue_conf *queue_conf)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
uint8_t priority, weight, affinity;
plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
/* Normalize <0-255> to <0-7> */
return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
queue_conf->priority / 32);
/* Default weight and affinity */
dev->mlt_prio[queue_id].weight = RTE_EVENT_QUEUE_WEIGHT_LOWEST;
dev->mlt_prio[queue_id].affinity = RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
priority = CNXK_QOS_NORMALIZE(queue_conf->priority, 0,
RTE_EVENT_DEV_PRIORITY_LOWEST,
CNXK_SSO_PRIORITY_CNT);
weight = CNXK_QOS_NORMALIZE(
dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
CNXK_SSO_AFFINITY_CNT);
plt_sso_dbg("Queue=%u prio=%u weight=%u affinity=%u", queue_id,
priority, weight, affinity);
return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
priority);
}
void
@ -314,6 +331,68 @@ cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
RTE_SET_USED(queue_id);
}
int
cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t queue_id,
uint32_t attr_id, uint32_t *attr_value)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
if (attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT)
*attr_value = dev->mlt_prio[queue_id].weight;
else if (attr_id == RTE_EVENT_QUEUE_ATTR_AFFINITY)
*attr_value = dev->mlt_prio[queue_id].affinity;
else
return -EINVAL;
return 0;
}
int
cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
uint32_t attr_id, uint64_t attr_value)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
uint8_t priority, weight, affinity;
struct rte_event_queue_conf *conf;
conf = &event_dev->data->queues_cfg[queue_id];
switch (attr_id) {
case RTE_EVENT_QUEUE_ATTR_PRIORITY:
conf->priority = attr_value;
break;
case RTE_EVENT_QUEUE_ATTR_WEIGHT:
dev->mlt_prio[queue_id].weight = attr_value;
break;
case RTE_EVENT_QUEUE_ATTR_AFFINITY:
dev->mlt_prio[queue_id].affinity = attr_value;
break;
case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
/* FALLTHROUGH */
plt_sso_dbg("Unsupported attribute id %u", attr_id);
return -ENOTSUP;
default:
plt_err("Invalid attribute id %u", attr_id);
return -EINVAL;
}
priority = CNXK_QOS_NORMALIZE(conf->priority, 0,
RTE_EVENT_DEV_PRIORITY_LOWEST,
CNXK_SSO_PRIORITY_CNT);
weight = CNXK_QOS_NORMALIZE(
dev->mlt_prio[queue_id].weight, CNXK_SSO_WEIGHT_MIN,
RTE_EVENT_QUEUE_WEIGHT_HIGHEST, CNXK_SSO_WEIGHT_CNT);
affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity, 0,
RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
CNXK_SSO_AFFINITY_CNT);
return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
priority);
}
void
cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
struct rte_event_port_conf *port_conf)

View File

@ -38,6 +38,11 @@
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
#define CNXK_SSO_WQE_SG_PTR (9)
#define CNXK_SSO_PRIORITY_CNT (0x8)
#define CNXK_SSO_WEIGHT_MAX (0x3f)
#define CNXK_SSO_WEIGHT_MIN (0x3)
#define CNXK_SSO_WEIGHT_CNT (CNXK_SSO_WEIGHT_MAX - CNXK_SSO_WEIGHT_MIN + 1)
#define CNXK_SSO_AFFINITY_CNT (0x10)
#define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY)
#define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY)
@ -54,6 +59,8 @@
#define CN10K_GW_MODE_PREF 1
#define CN10K_GW_MODE_PREF_WFE 2
#define CNXK_QOS_NORMALIZE(val, min, max, cnt) \
(min + val / ((max + cnt - 1) / cnt))
#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \
do { \
if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \
@ -79,6 +86,11 @@ struct cnxk_sso_qos {
uint16_t iaq_prcnt;
};
struct cnxk_sso_mlt_prio {
uint8_t weight;
uint8_t affinity;
};
struct cnxk_sso_evdev {
struct roc_sso sso;
uint8_t max_event_queues;
@ -108,6 +120,7 @@ struct cnxk_sso_evdev {
uint64_t *timer_adptr_sz;
uint16_t vec_pool_cnt;
uint64_t *vec_pools;
struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
/* Dev args */
uint32_t xae_cnt;
uint8_t qos_queue_cnt;
@ -234,6 +247,12 @@ void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
const struct rte_event_queue_conf *queue_conf);
void cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id);
int cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev,
uint8_t queue_id, uint32_t attr_id,
uint32_t *attr_value);
int cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev,
uint8_t queue_id, uint32_t attr_id,
uint64_t attr_value);
void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
struct rte_event_port_conf *port_conf);
int cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,