event/octeontx2: add timer arm routine

Add event timer arm routine.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2019-06-28 23:53:47 +05:30 committed by Jerin Jacob
parent f0b9982cb3
commit 80999bac03
4 changed files with 334 additions and 0 deletions

View File

@ -29,6 +29,23 @@ tim_get_msix_offsets(void)
return rc;
}
static void
tim_set_fp_ops(struct otx2_tim_ring *tim_ring)
{
uint8_t prod_flag = !tim_ring->prod_type_sp;
/* [MOD/AND] [DFB/FB] [SP][MP]*/
const rte_event_timer_arm_burst_t arm_burst[2][2][2] = {
#define FP(_name, _f3, _f2, _f1, flags) \
[_f3][_f2][_f1] = otx2_tim_arm_burst_ ## _name,
TIM_ARM_FASTPATH_MODES
#undef FP
};
otx2_tim_ops.arm_burst = arm_burst[tim_ring->optimized]
[tim_ring->ena_dfb][prod_flag];
}
static void
otx2_tim_ring_info_get(const struct rte_event_timer_adapter *adptr,
struct rte_event_timer_adapter_info *adptr_info)
@ -327,6 +344,9 @@ otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
tim_ring->base + TIM_LF_RING_BASE);
otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
/* Set fastpath ops. */
tim_set_fp_ops(tim_ring);
/* Update SSO xae count. */
sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)&nb_timers,
RTE_EVENT_TYPE_TIMER);

View File

@ -7,6 +7,7 @@
#include <rte_event_timer_adapter.h>
#include <rte_event_timer_adapter_pmd.h>
#include <rte_reciprocal.h>
#include "otx2_dev.h"
@ -70,6 +71,13 @@
#define OTX2_TIM_MAX_CHUNK_SLOTS (0x1FFE)
#define OTX2_TIM_MIN_TMO_TKS (256)
#define OTX2_TIM_SP 0x1
#define OTX2_TIM_MP 0x2
#define OTX2_TIM_BKT_AND 0x4
#define OTX2_TIM_BKT_MOD 0x8
#define OTX2_TIM_ENA_FB 0x10
#define OTX2_TIM_ENA_DFB 0x20
enum otx2_tim_clk_src {
OTX2_TIM_CLK_SRC_10NS = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
OTX2_TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
@ -95,6 +103,11 @@ struct otx2_tim_bkt {
uint64_t pad;
} __rte_packed __rte_aligned(32);
struct otx2_tim_ent {
uint64_t w0;
uint64_t wqe;
} __rte_packed;
struct otx2_tim_evdev {
struct rte_pci_device *pci_dev;
struct rte_eventdev *event_dev;
@ -111,8 +124,10 @@ struct otx2_tim_evdev {
struct otx2_tim_ring {
uintptr_t base;
struct rte_reciprocal_u64 fast_div;
uint16_t nb_chunk_slots;
uint32_t nb_bkts;
uint64_t ring_start_cyc;
struct otx2_tim_bkt *bkt;
struct rte_mempool *chunk_pool;
uint64_t tck_int;
@ -142,6 +157,24 @@ tim_priv_get(void)
return mz->addr;
}
#define TIM_ARM_FASTPATH_MODES \
FP(mod_sp, 0, 0, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
FP(mod_mp, 0, 0, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
FP(mod_fb_sp, 0, 1, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
FP(mod_fb_mp, 0, 1, 1, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_FB | OTX2_TIM_MP) \
FP(and_sp, 1, 0, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB | OTX2_TIM_SP) \
FP(and_mp, 1, 0, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_DFB | OTX2_TIM_MP) \
FP(and_fb_sp, 1, 1, 0, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB | OTX2_TIM_SP) \
FP(and_fb_mp, 1, 1, 1, OTX2_TIM_BKT_AND | OTX2_TIM_ENA_FB | OTX2_TIM_MP) \
#define FP(_name, _f3, _f2, _f1, flags) \
uint16_t otx2_tim_arm_burst_ ## _name( \
const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, \
const uint16_t nb_timers);
TIM_ARM_FASTPATH_MODES
#undef FP
int otx2_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps,
const struct rte_event_timer_adapter_ops **ops);

View File

@ -5,3 +5,80 @@
#include "otx2_tim_evdev.h"
#include "otx2_tim_worker.h"
static inline int
tim_arm_checks(const struct otx2_tim_ring * const tim_ring,
struct rte_event_timer * const tim)
{
if (unlikely(tim->state)) {
tim->state = RTE_EVENT_TIMER_ERROR;
rte_errno = EALREADY;
goto fail;
}
if (unlikely(!tim->timeout_ticks ||
tim->timeout_ticks >= tim_ring->nb_bkts)) {
tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
: RTE_EVENT_TIMER_ERROR_TOOEARLY;
rte_errno = EINVAL;
goto fail;
}
return 0;
fail:
return -EINVAL;
}
static inline void
tim_format_event(const struct rte_event_timer * const tim,
struct otx2_tim_ent * const entry)
{
entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
(tim->ev.event & 0xFFFFFFFFF);
entry->wqe = tim->ev.u64;
}
static __rte_always_inline uint16_t
tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
struct rte_event_timer **tim,
const uint16_t nb_timers,
const uint8_t flags)
{
struct otx2_tim_ring *tim_ring = adptr->data->adapter_priv;
struct otx2_tim_ent entry;
uint16_t index;
int ret;
for (index = 0; index < nb_timers; index++) {
if (tim_arm_checks(tim_ring, tim[index]))
break;
tim_format_event(tim[index], &entry);
if (flags & OTX2_TIM_SP)
ret = tim_add_entry_sp(tim_ring,
tim[index]->timeout_ticks,
tim[index], &entry, flags);
if (flags & OTX2_TIM_MP)
ret = tim_add_entry_mp(tim_ring,
tim[index]->timeout_ticks,
tim[index], &entry, flags);
if (unlikely(ret)) {
rte_errno = -ret;
break;
}
}
return index;
}
#define FP(_name, _f3, _f2, _f1, _flags) \
uint16_t __rte_noinline \
otx2_tim_arm_burst_ ## _name(const struct rte_event_timer_adapter *adptr, \
struct rte_event_timer **tim, \
const uint16_t nb_timers) \
{ \
return tim_timer_arm_burst(adptr, tim, nb_timers, _flags); \
}
TIM_ARM_FASTPATH_MODES
#undef FP

View File

@ -108,4 +108,208 @@ tim_bkt_clr_nent(struct otx2_tim_bkt *bktp)
return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
}
static __rte_always_inline struct otx2_tim_bkt *
tim_get_target_bucket(struct otx2_tim_ring * const tim_ring,
const uint32_t rel_bkt, const uint8_t flag)
{
const uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
&tim_ring->fast_div) + rel_bkt;
if (flag & OTX2_TIM_BKT_MOD)
bucket = bucket % tim_ring->nb_bkts;
if (flag & OTX2_TIM_BKT_AND)
bucket = bucket & (tim_ring->nb_bkts - 1);
return &tim_ring->bkt[bucket];
}
static struct otx2_tim_ent *
tim_clr_bkt(struct otx2_tim_ring * const tim_ring,
struct otx2_tim_bkt * const bkt)
{
struct otx2_tim_ent *chunk;
struct otx2_tim_ent *pnext;
chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk);
chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk +
tim_ring->nb_chunk_slots)->w0;
while (chunk) {
pnext = (struct otx2_tim_ent *)(uintptr_t)
((chunk + tim_ring->nb_chunk_slots)->w0);
rte_mempool_put(tim_ring->chunk_pool, chunk);
chunk = pnext;
}
return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk;
}
static struct otx2_tim_ent *
tim_refill_chunk(struct otx2_tim_bkt * const bkt,
struct otx2_tim_ring * const tim_ring)
{
struct otx2_tim_ent *chunk;
if (bkt->nb_entry || !bkt->first_chunk) {
if (unlikely(rte_mempool_get(tim_ring->chunk_pool,
(void **)&chunk)))
return NULL;
if (bkt->nb_entry) {
*(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
bkt->current_chunk) +
tim_ring->nb_chunk_slots) =
(uintptr_t)chunk;
} else {
bkt->first_chunk = (uintptr_t)chunk;
}
} else {
chunk = tim_clr_bkt(tim_ring, bkt);
bkt->first_chunk = (uintptr_t)chunk;
}
*(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
return chunk;
}
static struct otx2_tim_ent *
tim_insert_chunk(struct otx2_tim_bkt * const bkt,
struct otx2_tim_ring * const tim_ring)
{
struct otx2_tim_ent *chunk;
if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk)))
return NULL;
*(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
if (bkt->nb_entry) {
*(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
bkt->current_chunk) +
tim_ring->nb_chunk_slots) = (uintptr_t)chunk;
} else {
bkt->first_chunk = (uintptr_t)chunk;
}
return chunk;
}
static __rte_always_inline int
tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
const uint32_t rel_bkt,
struct rte_event_timer * const tim,
const struct otx2_tim_ent * const pent,
const uint8_t flags)
{
struct otx2_tim_ent *chunk;
struct otx2_tim_bkt *bkt;
uint64_t lock_sema;
int16_t rem;
bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
__retry:
/* Get Bucket sema*/
lock_sema = tim_bkt_fetch_sema(bkt);
/* Bucket related checks. */
if (unlikely(tim_bkt_get_hbt(lock_sema)))
goto __retry;
/* Insert the work. */
rem = tim_bkt_fetch_rem(lock_sema);
if (!rem) {
if (flags & OTX2_TIM_ENA_FB)
chunk = tim_refill_chunk(bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
chunk = tim_insert_chunk(bkt, tim_ring);
if (unlikely(chunk == NULL)) {
tim_bkt_set_rem(bkt, 0);
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
return -ENOMEM;
}
bkt->current_chunk = (uintptr_t)chunk;
tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
} else {
chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
chunk += tim_ring->nb_chunk_slots - rem;
}
/* Copy work entry. */
*chunk = *pent;
tim_bkt_inc_nent(bkt);
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
tim->state = RTE_EVENT_TIMER_ARMED;
return 0;
}
static __rte_always_inline int
tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
const uint32_t rel_bkt,
struct rte_event_timer * const tim,
const struct otx2_tim_ent * const pent,
const uint8_t flags)
{
struct otx2_tim_ent *chunk;
struct otx2_tim_bkt *bkt;
uint64_t lock_sema;
int16_t rem;
__retry:
bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
/* Get Bucket sema*/
lock_sema = tim_bkt_fetch_sema_lock(bkt);
/* Bucket related checks. */
if (unlikely(tim_bkt_get_hbt(lock_sema))) {
tim_bkt_dec_lock(bkt);
goto __retry;
}
rem = tim_bkt_fetch_rem(lock_sema);
if (rem < 0) {
/* Goto diff bucket. */
tim_bkt_dec_lock(bkt);
goto __retry;
} else if (!rem) {
/* Only one thread can be here*/
if (flags & OTX2_TIM_ENA_FB)
chunk = tim_refill_chunk(bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
chunk = tim_insert_chunk(bkt, tim_ring);
if (unlikely(chunk == NULL)) {
tim_bkt_set_rem(bkt, 0);
tim_bkt_dec_lock(bkt);
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
return -ENOMEM;
}
bkt->current_chunk = (uintptr_t)chunk;
tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
} else {
chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
chunk += tim_ring->nb_chunk_slots - rem;
}
/* Copy work entry. */
*chunk = *pent;
tim_bkt_dec_lock(bkt);
tim_bkt_inc_nent(bkt);
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
tim->state = RTE_EVENT_TIMER_ARMED;
return 0;
}
#endif /* __OTX2_TIM_WORKER_H__ */