event/octeontx: add option to use fpavf as chunk pool

Add compile-time configurable option to force TIMvf to use Octeontx
FPAvf pool manager as its chunk pool.
When FPAvf is used as pool manager the TIMvf automatically frees the
chunks to FPAvf through gpool-id.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
This commit is contained in:
Pavan Nikhilesh 2018-04-10 02:30:34 +05:30 committed by Thomas Monjalon
parent 4cec5aae58
commit 3e249bc559
4 changed files with 50 additions and 4 deletions

View File

@ -125,7 +125,9 @@ static int
timvf_ring_start(const struct rte_event_timer_adapter *adptr)
{
int ret;
uint8_t use_fpa = 0;
uint64_t interval;
uintptr_t pool;
struct timvf_ctrl_reg rctrl;
struct timvf_mbox_dev_info dinfo;
struct timvf_ring *timr = adptr->data->adapter_priv;
@ -155,6 +157,9 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
return -EINVAL;
}
if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
use_fpa = 1;
/*CTRL0 register.*/
rctrl.rctrl0 = interval;
@ -167,9 +172,24 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
if (use_fpa) {
pool = (uintptr_t)((struct rte_mempool *)
timr->chunk_pool)->pool_id;
ret = octeontx_fpa_bufpool_gpool(pool);
if (ret < 0) {
timvf_log_dbg("Unable to get gaura id");
ret = -ENOMEM;
goto error;
}
timvf_write64((uint64_t)ret,
(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
} else {
rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
}
timvf_write64((uintptr_t)timr->bkt,
(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
timvf_set_chunk_refill(timr);
timvf_set_chunk_refill(timr, use_fpa);
if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
ret = -EACCES;
goto error;

View File

@ -25,6 +25,7 @@
#include <rte_reciprocal.h>
#include <octeontx_mbox.h>
#include <octeontx_fpavf.h>
#define timvf_log(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
@ -220,6 +221,6 @@ uint16_t timvf_timer_arm_tmo_brst_stats(
const struct rte_event_timer_adapter *adptr,
struct rte_event_timer **tim, const uint64_t timeout_tick,
const uint16_t nb_timers);
void timvf_set_chunk_refill(struct timvf_ring * const timr);
void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
#endif /* __TIMVF_EVDEV_H__ */

View File

@ -191,7 +191,10 @@ timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
}
void
timvf_set_chunk_refill(struct timvf_ring * const timr)
timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
{
timr->refill_chunk = timvf_refill_chunk_generic;
if (use_fpa)
timr->refill_chunk = timvf_refill_chunk_fpa;
else
timr->refill_chunk = timvf_refill_chunk_generic;
}

View File

@ -213,6 +213,28 @@ timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
return chunk;
}
static inline struct tim_mem_entry *
timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
struct timvf_ring * const timr)
{
struct tim_mem_entry *chunk;
if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
return NULL;
*(uint64_t *)(chunk + nb_chunk_slots) = 0;
if (bkt->nb_entry) {
*(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
bkt->current_chunk) +
nb_chunk_slots) =
(uintptr_t) chunk;
} else {
bkt->first_chunk = (uintptr_t) chunk;
}
return chunk;
}
static inline struct tim_mem_bucket *
timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
{