eventdev/eth_rx: simplify event vector config

Include vector configuration into the structure
``rte_event_eth_rx_adapter_queue_conf`` that is used to configure
Rx adapter ethernet device Rx queue parameters.
This simplifies event vector configuration as it avoids splitting
configuration per Rx queue.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2021-09-15 18:45:20 +05:30 committed by Jerin Jacob
parent e3f128dbee
commit 929ebdd543
9 changed files with 118 additions and 272 deletions

View File

@ -331,7 +331,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
uint16_t prod;
struct rte_mempool *vector_pool = NULL;
struct rte_event_eth_rx_adapter_queue_conf queue_conf;
struct rte_event_eth_rx_adapter_event_vector_config vec_conf;
memset(&queue_conf, 0,
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
@ -397,8 +396,12 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
}
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
queue_conf.vector_sz = opt->vector_size;
queue_conf.vector_timeout_ns =
opt->vector_tmo_nsec;
queue_conf.rx_queue_flags |=
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
queue_conf.vector_mp = vector_pool;
} else {
evt_err("Rx adapter doesn't support event vector");
return -EINVAL;
@ -418,17 +421,6 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
return ret;
}
if (opt->ena_vector) {
vec_conf.vector_sz = opt->vector_size;
vec_conf.vector_timeout_ns = opt->vector_tmo_nsec;
vec_conf.vector_mp = vector_pool;
if (rte_event_eth_rx_adapter_queue_event_vector_config(
prod, prod, -1, &vec_conf) < 0) {
evt_err("Failed to configure event vectorization for Rx adapter");
return -EINVAL;
}
}
if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
uint32_t service_id = -1U;

View File

@ -195,12 +195,17 @@ The event devices, ethernet device pairs which support the capability
flow characteristics and generate a ``rte_event`` containing ``rte_event_vector``
whose event type is either ``RTE_EVENT_TYPE_ETHDEV_VECTOR`` or
``RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR``.
The aggregation size and timeout are configurable at a queue level and the
maximum, minimum vector sizes and timeouts vary based on the device capability
and can be queried using ``rte_event_eth_rx_adapter_vector_limits_get``.
The maximum, minimum vector sizes and timeouts vary based on the device
capability and can be queried using
``rte_event_eth_rx_adapter_vector_limits_get``.
The Rx adapter additionally might include useful data such as ethernet device
port and queue identifier in the ``rte_event_vector::port`` and
``rte_event_vector::queue`` and mark ``rte_event_vector::attr_valid`` as true.
The aggregation size and timeout are configurable at a queue level by setting
``rte_event_eth_rx_adapter_queue_conf::vector_sz``,
``rte_event_eth_rx_adapter_queue_conf::vector_timeout_ns`` and
``rte_event_eth_rx_adapter_queue_conf::vector_mp`` when adding queues using
``rte_event_eth_rx_adapter_queue_add``.
A loop processing ``rte_event_vector`` containing mbufs is shown below.

View File

@ -166,6 +166,12 @@ Deprecation Notices
values to the function ``rte_event_eth_rx_adapter_queue_add`` using
the structure ``rte_event_eth_rx_adapter_queue_add``.
* eventdev: Reserved bytes of ``rte_event_crypto_request`` is a space holder
for ``response_info``. Both should be decoupled for better clarity in
DPDK 21.11.
New space for ``response_info`` can be made by changing
``rte_event_crypto_metadata`` type to structure from union.
* metrics: The function ``rte_metrics_init`` will have a non-void return
in order to notify errors instead of calling ``rte_exit``.

View File

@ -695,81 +695,6 @@ cn10k_sso_rx_adapter_vector_limits(
return 0;
}
static int
cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
uint16_t port_id, uint16_t rq_id, uint16_t sz,
uint64_t tmo_ns, struct rte_mempool *vmp)
{
struct roc_nix_rq *rq;
rq = &cnxk_eth_dev->rqs[rq_id];
if (!rq->sso_ena)
return -EINVAL;
if (rq->flow_tag_width == 0)
return -EINVAL;
rq->vwqe_ena = 1;
rq->vwqe_first_skip = 0;
rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
rq->vwqe_max_sz_exp = rte_log2_u32(sz);
rq->vwqe_wait_tmo =
tmo_ns /
((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
rq->tag_mask = (port_id & 0xF) << 20;
rq->tag_mask |=
(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
<< 24;
return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
}
static int
cn10k_sso_rx_adapter_vector_config(
const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_event_vector_config *config)
{
struct cnxk_eth_dev *cnxk_eth_dev;
struct cnxk_sso_evdev *dev;
int i, rc;
rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
if (rc)
return -EINVAL;
dev = cnxk_sso_pmd_priv(event_dev);
cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
if (rx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
RTE_EVENT_TYPE_ETHDEV_VECTOR);
rc = cnxk_sso_xae_reconfigure(
(struct rte_eventdev *)(uintptr_t)event_dev);
rc = cnxk_sso_rx_adapter_vwqe_enable(
cnxk_eth_dev, eth_dev->data->port_id, i,
config->vector_sz, config->vector_timeout_ns,
config->vector_mp);
if (rc)
return -EINVAL;
}
} else {
cnxk_sso_updt_xae_cnt(dev, config->vector_mp,
RTE_EVENT_TYPE_ETHDEV_VECTOR);
rc = cnxk_sso_xae_reconfigure(
(struct rte_eventdev *)(uintptr_t)event_dev);
rc = cnxk_sso_rx_adapter_vwqe_enable(
cnxk_eth_dev, eth_dev->data->port_id, rx_queue_id,
config->vector_sz, config->vector_timeout_ns,
config->vector_mp);
if (rc)
return -EINVAL;
}
return 0;
}
static int
cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, uint32_t *caps)
@ -883,8 +808,6 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
.eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
.eth_rx_adapter_event_vector_config =
cn10k_sso_rx_adapter_vector_config,
.eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
.eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,

View File

@ -175,6 +175,35 @@ cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
}
static int
cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
uint16_t port_id, uint16_t rq_id, uint16_t sz,
uint64_t tmo_ns, struct rte_mempool *vmp)
{
struct roc_nix_rq *rq;
rq = &cnxk_eth_dev->rqs[rq_id];
if (!rq->sso_ena)
return -EINVAL;
if (rq->flow_tag_width == 0)
return -EINVAL;
rq->vwqe_ena = 1;
rq->vwqe_first_skip = 0;
rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
rq->vwqe_max_sz_exp = rte_log2_u32(sz);
rq->vwqe_wait_tmo =
tmo_ns /
((roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100);
rq->tag_mask = (port_id & 0xF) << 20;
rq->tag_mask |=
(((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV_VECTOR << 4))
<< 24;
return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
}
int
cnxk_sso_rx_adapter_queue_add(
const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@ -202,6 +231,18 @@ cnxk_sso_rx_adapter_queue_add(
&queue_conf->ev,
!!(queue_conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
if (queue_conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
RTE_EVENT_TYPE_ETHDEV_VECTOR);
rc |= cnxk_sso_xae_reconfigure(
(struct rte_eventdev *)(uintptr_t)event_dev);
rc |= cnxk_sso_rx_adapter_vwqe_enable(
cnxk_eth_dev, port, rx_queue_id,
queue_conf->vector_sz,
queue_conf->vector_timeout_ns,
queue_conf->vector_mp);
}
rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, true,
dev->force_ena_bp);

View File

@ -663,32 +663,6 @@ typedef int (*eventdev_eth_rx_adapter_vector_limits_get_t)(
const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
struct rte_event_eth_rx_adapter_vector_limits *limits);
struct rte_event_eth_rx_adapter_event_vector_config;
/**
* Enable event vector on an given Rx queue of a ethernet devices belonging to
* the Rx adapter.
*
* @param dev
* Event device pointer
*
* @param eth_dev
* Ethernet device pointer
*
* @param rx_queue_id
* The Rx queue identifier
*
* @param config
* Pointer to the event vector configuration structure.
*
* @return
* - 0: Success.
* - <0: Error code returned by the driver function.
*/
typedef int (*eventdev_eth_rx_adapter_event_vector_config_t)(
const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_event_vector_config *config);
typedef uint32_t rte_event_pmd_selftest_seqn_t;
extern int rte_event_pmd_selftest_seqn_dynfield_offset;
@ -1114,9 +1088,6 @@ struct rte_eventdev_ops {
eventdev_eth_rx_adapter_vector_limits_get_t
eth_rx_adapter_vector_limits_get;
/**< Get event vector limits for the Rx adapter */
eventdev_eth_rx_adapter_event_vector_config_t
eth_rx_adapter_event_vector_config;
/**< Configure Rx adapter with event vector */
eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
/**< Get timer adapter capabilities */

View File

@ -1947,6 +1947,24 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
} else
qi_ev->flow_id = 0;
if (conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
queue_info->ena_vector = 1;
qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
rxa_set_vector_data(queue_info, conf->vector_sz,
conf->vector_timeout_ns, conf->vector_mp,
rx_queue_id, dev_info->dev->data->port_id);
rx_adapter->ena_vector = 1;
rx_adapter->vector_tmo_ticks =
rx_adapter->vector_tmo_ticks ?
RTE_MIN(queue_info->vector_data
.vector_timeout_ticks >>
1,
rx_adapter->vector_tmo_ticks) :
queue_info->vector_data.vector_timeout_ticks >>
1;
}
rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
if (rxa_polled_queue(dev_info, rx_queue_id)) {
rx_adapter->num_rx_polled += !pollq;
@ -1972,42 +1990,6 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
}
}
static void
rxa_sw_event_vector_configure(
struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
int rx_queue_id,
const struct rte_event_eth_rx_adapter_event_vector_config *config)
{
struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
struct eth_rx_queue_info *queue_info;
struct rte_event *qi_ev;
if (rx_queue_id == -1) {
uint16_t nb_rx_queues;
uint16_t i;
nb_rx_queues = dev_info->dev->data->nb_rx_queues;
for (i = 0; i < nb_rx_queues; i++)
rxa_sw_event_vector_configure(rx_adapter, eth_dev_id, i,
config);
return;
}
queue_info = &dev_info->rx_queue[rx_queue_id];
qi_ev = (struct rte_event *)&queue_info->event;
queue_info->ena_vector = 1;
qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
rxa_set_vector_data(queue_info, config->vector_sz,
config->vector_timeout_ns, config->vector_mp,
rx_queue_id, dev_info->dev->data->port_id);
rx_adapter->ena_vector = 1;
rx_adapter->vector_tmo_ticks =
rx_adapter->vector_tmo_ticks ?
RTE_MIN(config->vector_timeout_ns >> 1,
rx_adapter->vector_tmo_ticks) :
config->vector_timeout_ns >> 1;
}
static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
uint16_t eth_dev_id,
int rx_queue_id,
@ -2322,6 +2304,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
struct rte_event_eth_rx_adapter *rx_adapter;
struct rte_eventdev *dev;
struct eth_device_info *dev_info;
struct rte_event_eth_rx_adapter_vector_limits limits;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
@ -2349,13 +2332,46 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
return -EINVAL;
}
if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0 &&
(queue_conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)) {
RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
" eth port: %" PRIu16 " adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
if (queue_conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
" eth port: %" PRIu16
" adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
ret = rte_event_eth_rx_adapter_vector_limits_get(
rx_adapter->eventdev_id, eth_dev_id, &limits);
if (ret < 0) {
RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
" eth port: %" PRIu16
" adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
if (queue_conf->vector_sz < limits.min_sz ||
queue_conf->vector_sz > limits.max_sz ||
queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
queue_conf->vector_mp == NULL) {
RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
" eth port: %" PRIu16
" adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
if (queue_conf->vector_mp->elt_size <
(sizeof(struct rte_event_vector) +
(sizeof(uintptr_t) * queue_conf->vector_sz))) {
RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
" eth port: %" PRIu16
" adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
}
if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
@ -2551,83 +2567,6 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
return ret;
}
int
rte_event_eth_rx_adapter_queue_event_vector_config(
uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
struct rte_event_eth_rx_adapter_event_vector_config *config)
{
struct rte_event_eth_rx_adapter_vector_limits limits;
struct rte_event_eth_rx_adapter *rx_adapter;
struct rte_eventdev *dev;
uint32_t cap;
int ret;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
rx_adapter = rxa_id_to_adapter(id);
if ((rx_adapter == NULL) || (config == NULL))
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
eth_dev_id, &cap);
if (ret) {
RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
"eth port %" PRIu16,
id, eth_dev_id);
return ret;
}
if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR)) {
RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
" eth port: %" PRIu16 " adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
ret = rte_event_eth_rx_adapter_vector_limits_get(
rx_adapter->eventdev_id, eth_dev_id, &limits);
if (ret) {
RTE_EDEV_LOG_ERR("Failed to get vector limits edev %" PRIu8
"eth port %" PRIu16,
rx_adapter->eventdev_id, eth_dev_id);
return ret;
}
if (config->vector_sz < limits.min_sz ||
config->vector_sz > limits.max_sz ||
config->vector_timeout_ns < limits.min_timeout_ns ||
config->vector_timeout_ns > limits.max_timeout_ns ||
config->vector_mp == NULL) {
RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
" eth port: %" PRIu16 " adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
if (config->vector_mp->elt_size <
(sizeof(struct rte_event_vector) +
(sizeof(uintptr_t) * config->vector_sz))) {
RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
" eth port: %" PRIu16 " adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
RTE_FUNC_PTR_OR_ERR_RET(
*dev->dev_ops->eth_rx_adapter_event_vector_config,
-ENOTSUP);
ret = dev->dev_ops->eth_rx_adapter_event_vector_config(
dev, &rte_eth_devices[eth_dev_id], rx_queue_id, config);
} else {
rxa_sw_event_vector_configure(rx_adapter, eth_dev_id,
rx_queue_id, config);
}
return ret;
}
int
rte_event_eth_rx_adapter_vector_limits_get(
uint8_t dev_id, uint16_t eth_port_id,

View File

@ -171,9 +171,6 @@ struct rte_event_eth_rx_adapter_queue_conf {
* The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
* enqueued event.
*/
};
struct rte_event_eth_rx_adapter_event_vector_config {
uint16_t vector_sz;
/**<
* Indicates the maximum number for mbufs to combine and form a vector.
@ -548,33 +545,6 @@ int rte_event_eth_rx_adapter_vector_limits_get(
uint8_t dev_id, uint16_t eth_port_id,
struct rte_event_eth_rx_adapter_vector_limits *limits);
/**
* Configure event vectorization for a given ethernet device queue, that has
* been added to a event eth Rx adapter.
*
* @param id
* The identifier of the ethernet Rx event adapter.
*
* @param eth_dev_id
* The identifier of the ethernet device.
*
* @param rx_queue_id
* Ethernet device receive queue index.
* If rx_queue_id is -1, then all Rx queues configured for the ethernet device
* are configured with event vectorization.
*
* @param config
* Event vector configuration structure.
*
* @return
* - 0: Success, Receive queue configured correctly.
* - <0: Error code on failure.
*/
__rte_experimental
int rte_event_eth_rx_adapter_queue_event_vector_config(
uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id,
struct rte_event_eth_rx_adapter_event_vector_config *config);
#ifdef __cplusplus
}
#endif

View File

@ -142,7 +142,6 @@ EXPERIMENTAL {
#added in 21.05
rte_event_vector_pool_create;
rte_event_eth_rx_adapter_vector_limits_get;
rte_event_eth_rx_adapter_queue_event_vector_config;
__rte_eventdev_trace_crypto_adapter_enqueue;
};