event/octeontx2: add devargs for inflight buffer count

The number of events for a *open system* event device is specified
as -1 as per the eventdev specification.
Since, Octeontx2 SSO inflight events are only limited by DRAM size, the
xae_cnt devargs parameter is introduced to provide upper limit for
in-flight events.

Example:
	--dev "0002:0e:00.0,xae_cnt=8192"

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2019-06-28 23:53:18 +05:30 committed by Jerin Jacob
parent 5f96f77bb6
commit 55e778ca46
4 changed files with 51 additions and 2 deletions

View File

@ -46,6 +46,18 @@ The following option can be modified in the ``config`` file.
Toggle compilation of the ``librte_pmd_octeontx2_event`` driver.
Runtime Config Options
~~~~~~~~~~~~~~~~~~~~~~
- ``Maximum number of in-flight events`` (default ``8192``)
In **Marvell OCTEON TX2** the max number of in-flight events are only limited
by DRAM size, the ``xae_cnt`` devargs parameter is introduced to provide
upper limit for in-flight events.
For example::
--dev "0002:0e:00.0,xae_cnt=16384"
Debugging Options
~~~~~~~~~~~~~~~~~

View File

@ -35,7 +35,7 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev.c
LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci
LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci -lrte_kvargs
LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf
LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2

View File

@ -8,6 +8,7 @@
#include <rte_common.h>
#include <rte_eal.h>
#include <rte_eventdev_pmd_pci.h>
#include <rte_kvargs.h>
#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
@ -245,7 +246,10 @@ sso_xaq_allocate(struct otx2_sso_evdev *dev)
/* Taken from HRM 14.3.3(4) */
xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
xaq_cnt += (dev->iue / dev->xae_waes) +
if (dev->xae_cnt)
xaq_cnt += dev->xae_cnt / dev->xae_waes;
else
xaq_cnt += (dev->iue / dev->xae_waes) +
(OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
otx2_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
@ -464,6 +468,25 @@ static struct rte_eventdev_ops otx2_sso_ops = {
.queue_release = otx2_sso_queue_release,
};
#define OTX2_SSO_XAE_CNT "xae_cnt"
static void
sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
{
struct rte_kvargs *kvlist;
if (devargs == NULL)
return;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
return;
rte_kvargs_process(kvlist, OTX2_SSO_XAE_CNT, &parse_kvargs_value,
&dev->xae_cnt);
rte_kvargs_free(kvlist);
}
static int
otx2_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
{
@ -553,6 +576,8 @@ otx2_sso_init(struct rte_eventdev *event_dev)
goto otx2_npa_lf_uninit;
}
sso_parse_devargs(dev, pci_dev->device.devargs);
otx2_sso_pf_func_set(dev->pf_func);
otx2_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
event_dev->data->name, dev->max_event_queues,
@ -601,3 +626,4 @@ otx2_sso_fini(struct rte_eventdev *event_dev)
RTE_PMD_REGISTER_PCI(event_octeontx2, pci_sso);
RTE_PMD_REGISTER_PCI_TABLE(event_octeontx2, pci_sso_map);
RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>");

View File

@ -62,6 +62,8 @@ struct otx2_sso_evdev {
uint64_t nb_xaq_cfg;
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
/* Dev args */
uint32_t xae_cnt;
/* HW const */
uint32_t xae_waes;
uint32_t xaq_buf_size;
@ -74,6 +76,15 @@ sso_pmd_priv(const struct rte_eventdev *event_dev)
return event_dev->data->dev_private;
}
static inline int
parse_kvargs_value(const char *key, const char *value, void *opaque)
{
RTE_SET_USED(key);
*(uint32_t *)opaque = (uint32_t)atoi(value);
return 0;
}
/* Init and Fini API's */
int otx2_sso_init(struct rte_eventdev *event_dev);
int otx2_sso_fini(struct rte_eventdev *event_dev);