event/cnxk: add option to disable NPA

If the chunks are allocated from NPA then TIM can automatically free
them when traversing the list of chunks.
Add devargs to disable NPA and use software mempool to manage chunks.

Example:
	--dev "0002:0e:00.0,tim_disable_npa=1"

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2021-05-04 05:57:15 +05:30 committed by Jerin Jacob
parent 0e792433d0
commit 1b06a817b8
6 changed files with 91 additions and 23 deletions

View File

@ -93,6 +93,16 @@ Runtime Config Options
-a 0002:0e:00.0,qos=[1-50-50-50]
- ``TIM disable NPA``
By default chunks are allocated from NPA then TIM can automatically free
them when traversing the list of chunks. The ``tim_disable_npa`` devargs
parameter disables NPA and uses software mempool to manage chunks
For example::
-a 0002:0e:00.0,tim_disable_npa=1
Debugging Options
-----------------

View File

@ -502,4 +502,5 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
CNXK_SSO_GGRP_QOS "=<string>"
CN10K_SSO_GW_MODE "=<int>");
CN10K_SSO_GW_MODE "=<int>"
CNXK_TIM_DISABLE_NPA "=1");

View File

@ -571,4 +571,5 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
CNXK_SSO_GGRP_QOS "=<string>"
CN9K_SSO_SINGLE_WS "=1");
CN9K_SSO_SINGLE_WS "=1"
CNXK_TIM_DISABLE_NPA "=1");

View File

@ -159,6 +159,15 @@ struct cnxk_sso_hws_cookie {
bool configured;
} __rte_cache_aligned;
static inline int
parse_kvargs_flag(const char *key, const char *value, void *opaque)
{
RTE_SET_USED(key);
*(uint8_t *)opaque = !!atoi(value);
return 0;
}
static inline int
parse_kvargs_value(const char *key, const char *value, void *opaque)
{

View File

@ -31,30 +31,43 @@ cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
cache_sz = cache_sz != 0 ? cache_sz : 2;
tim_ring->nb_chunks += (cache_sz * rte_lcore_count());
tim_ring->chunk_pool = rte_mempool_create_empty(
pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz, cache_sz, 0,
rte_socket_id(), mp_flags);
if (!tim_ring->disable_npa) {
tim_ring->chunk_pool = rte_mempool_create_empty(
pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
cache_sz, 0, rte_socket_id(), mp_flags);
if (tim_ring->chunk_pool == NULL) {
plt_err("Unable to create chunkpool.");
return -ENOMEM;
}
if (tim_ring->chunk_pool == NULL) {
plt_err("Unable to create chunkpool.");
return -ENOMEM;
}
rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
rte_mbuf_platform_mempool_ops(), NULL);
if (rc < 0) {
plt_err("Unable to set chunkpool ops");
goto free;
}
rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
rte_mbuf_platform_mempool_ops(),
NULL);
if (rc < 0) {
plt_err("Unable to set chunkpool ops");
goto free;
}
rc = rte_mempool_populate_default(tim_ring->chunk_pool);
if (rc < 0) {
plt_err("Unable to set populate chunkpool.");
goto free;
rc = rte_mempool_populate_default(tim_ring->chunk_pool);
if (rc < 0) {
plt_err("Unable to set populate chunkpool.");
goto free;
}
tim_ring->aura = roc_npa_aura_handle_to_aura(
tim_ring->chunk_pool->pool_id);
tim_ring->ena_dfb = 0;
} else {
tim_ring->chunk_pool = rte_mempool_create(
pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
cache_sz, 0, NULL, NULL, NULL, NULL, rte_socket_id(),
mp_flags);
if (tim_ring->chunk_pool == NULL) {
plt_err("Unable to create chunkpool.");
return -ENOMEM;
}
tim_ring->ena_dfb = 1;
}
tim_ring->aura =
roc_npa_aura_handle_to_aura(tim_ring->chunk_pool->pool_id);
tim_ring->ena_dfb = 0;
return 0;
@ -110,8 +123,17 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
tim_ring->nb_timers = rcfg->nb_timers;
tim_ring->chunk_sz = dev->chunk_sz;
tim_ring->disable_npa = dev->disable_npa;
if (tim_ring->disable_npa) {
tim_ring->nb_chunks =
tim_ring->nb_timers /
CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
} else {
tim_ring->nb_chunks = tim_ring->nb_timers;
}
tim_ring->nb_chunks = tim_ring->nb_timers;
tim_ring->nb_chunk_slots = CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
/* Create buckets. */
tim_ring->bkt =
@ -199,6 +221,24 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
return 0;
}
static void
cnxk_tim_parse_devargs(struct rte_devargs *devargs, struct cnxk_tim_evdev *dev)
{
struct rte_kvargs *kvlist;
if (devargs == NULL)
return;
kvlist = rte_kvargs_parse(devargs->args, NULL);
if (kvlist == NULL)
return;
rte_kvargs_process(kvlist, CNXK_TIM_DISABLE_NPA, &parse_kvargs_flag,
&dev->disable_npa);
rte_kvargs_free(kvlist);
}
void
cnxk_tim_init(struct roc_sso *sso)
{
@ -217,6 +257,8 @@ cnxk_tim_init(struct roc_sso *sso)
}
dev = mz->addr;
cnxk_tim_parse_devargs(sso->pci_dev->device.devargs, dev);
dev->tim.roc_sso = sso;
rc = roc_tim_init(&dev->tim);
if (rc < 0) {

View File

@ -33,11 +33,15 @@
#define CN9K_TIM_MIN_TMO_TKS (256)
#define CNXK_TIM_DISABLE_NPA "tim_disable_npa"
struct cnxk_tim_evdev {
struct roc_tim tim;
struct rte_eventdev *event_dev;
uint16_t nb_rings;
uint32_t chunk_sz;
/* Dev args */
uint8_t disable_npa;
};
enum cnxk_tim_clk_src {
@ -75,6 +79,7 @@ struct cnxk_tim_ring {
struct rte_mempool *chunk_pool;
uint64_t arm_cnt;
uint8_t prod_type_sp;
uint8_t disable_npa;
uint8_t ena_dfb;
uint16_t ring_id;
uint32_t aura;