event/cnxk: add option for in-flight buffer count

The number of events for a *open system* event device is specified
as -1 as per the eventdev specification.
Since, SSO inflight events are only limited by DRAM size, the
xae_cnt devargs parameter is introduced to provide upper limit for
in-flight events.

Example:
        --dev "0002:0e:00.0,xae_cnt=8192"

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
This commit is contained in:
Shijith Thotton 2021-05-04 05:57:00 +05:30 committed by Jerin Jacob
parent b85105230d
commit e656d40fd1
5 changed files with 53 additions and 2 deletions

View File

@ -41,6 +41,20 @@ Prerequisites and Compilation procedure
See :doc:`../platform/cnxk` for setup information.
Runtime Config Options
----------------------
- ``Maximum number of in-flight events`` (default ``8192``)
In **Marvell OCTEON cnxk** the max number of in-flight events are only limited
by DRAM size, the ``xae_cnt`` devargs parameter is introduced to provide
upper limit for in-flight events.
For example::
-a 0002:0e:00.0,xae_cnt=16384
Debugging Options
-----------------

View File

@ -143,3 +143,4 @@ static struct rte_pci_driver cn10k_pci_sso = {
RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>");

View File

@ -146,3 +146,4 @@ static struct rte_pci_driver cn9k_pci_sso = {
RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>");

View File

@ -75,8 +75,11 @@ cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
/* Taken from HRM 14.3.3(4) */
xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
(CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
if (dev->xae_cnt)
xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
else
xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
(CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
/* Setup XAQ based on number of nb queues. */
@ -222,6 +225,22 @@ cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
port_conf->enqueue_depth = 1;
}
static void
cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
{
struct rte_kvargs *kvlist;
if (devargs == NULL)
return;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
return;
rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
&dev->xae_cnt);
rte_kvargs_free(kvlist);
}
int
cnxk_sso_init(struct rte_eventdev *event_dev)
{
@ -242,6 +261,7 @@ cnxk_sso_init(struct rte_eventdev *event_dev)
dev->sso.pci_dev = pci_dev;
*(uint64_t *)mz->addr = (uint64_t)dev;
cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
/* Initialize the base cnxk_dev object */
rc = roc_sso_dev_init(&dev->sso);

View File

@ -5,6 +5,8 @@
#ifndef __CNXK_EVENTDEV_H__
#define __CNXK_EVENTDEV_H__
#include <rte_devargs.h>
#include <rte_kvargs.h>
#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
@ -12,6 +14,8 @@
#include "roc_api.h"
#define CNXK_SSO_XAE_CNT "xae_cnt"
#define USEC2NSEC(__us) ((__us)*1E3)
#define CNXK_SSO_FC_NAME "cnxk_evdev_xaq_fc"
@ -35,10 +39,21 @@ struct cnxk_sso_evdev {
uint64_t nb_xaq_cfg;
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
/* Dev args */
uint32_t xae_cnt;
/* CN9K */
uint8_t dual_ws;
} __rte_cache_aligned;
static inline int
parse_kvargs_value(const char *key, const char *value, void *opaque)
{
RTE_SET_USED(key);
*(uint32_t *)opaque = (uint32_t)atoi(value);
return 0;
}
static inline struct cnxk_sso_evdev *
cnxk_sso_pmd_priv(const struct rte_eventdev *event_dev)
{