event/cnxk: add SSO HW device operations

Add SSO HW device operations used for enqueue/dequeue.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2021-05-04 05:57:05 +05:30 committed by Jerin Jacob
parent 7ffa737996
commit e239e0d3fa
7 changed files with 527 additions and 0 deletions

View File

@ -0,0 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#include "cn10k_worker.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"

View File

@ -0,0 +1,151 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#ifndef __CN10K_WORKER_H__
#define __CN10K_WORKER_H__
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
/* SSO Operations */
static __rte_always_inline uint8_t
cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint64_t event_ptr = ev->u64;
const uint16_t grp = ev->queue_id;
rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
if (ws->xaq_lmt <= *ws->fc_mem)
return 0;
cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
return 1;
}
static __rte_always_inline void
cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(ws->tag_wqe_op));
/* CNXK model
* cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
*
* SSO_TT_ORDERED norm norm untag
* SSO_TT_ATOMIC norm norm untag
* SSO_TT_UNTAGGED norm norm NOOP
*/
if (new_tt == SSO_TT_UNTAGGED) {
if (cur_tt != SSO_TT_UNTAGGED)
cnxk_sso_hws_swtag_untag(ws->swtag_untag_op);
} else {
cnxk_sso_hws_swtag_norm(tag, new_tt, ws->swtag_norm_op);
}
ws->swtag_req = 1;
}
static __rte_always_inline void
cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,
const uint16_t grp)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
plt_write64(ev->u64, ws->updt_wqe_op);
cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
}
static __rte_always_inline void
cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_wqe_op)) == grp)
cn10k_sso_hws_fwd_swtag(ws, ev);
else
/*
* Group has been changed for group based work pipelining,
* Use deschedule/add_work operation to transfer the event to
* new group/core
*/
cn10k_sso_hws_fwd_group(ws, ev, grp);
}
static __rte_always_inline uint16_t
cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev)
{
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
gw.get_work = ws->gw_wdata;
#if defined(RTE_ARCH_ARM64) && !defined(__clang__)
asm volatile(
PLT_CPU_FEATURE_PREAMBLE
"caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
: [wdata] "+r"(gw.get_work)
: [gw_loc] "r"(ws->getwrk_op)
: "memory");
#else
plt_write64(gw.u64[0], ws->getwrk_op);
do {
roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
} while (gw.u64[0] & BIT_ULL(63));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
return !!gw.u64[1];
}
/* Used in cleaning up workslot. */
static __rte_always_inline uint16_t
cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
{
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldp %[tag], %[wqp], [%[tag_loc]] \n"
" tbz %[tag], 63, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldp %[tag], %[wqp], [%[tag_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
: [tag_loc] "r"(ws->tag_wqe_op)
: "memory");
#else
do {
roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
} while (gw.u64[0] & BIT_ULL(63));
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
return !!gw.u64[1];
}
#endif

View File

@ -0,0 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#include "roc_api.h"
#include "cn9k_worker.h"

View File

@ -0,0 +1,249 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#ifndef __CN9K_WORKER_H__
#define __CN9K_WORKER_H__
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
/* SSO Operations */
static __rte_always_inline uint8_t
cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint64_t event_ptr = ev->u64;
const uint16_t grp = ev->queue_id;
rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
if (ws->xaq_lmt <= *ws->fc_mem)
return 0;
cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
return 1;
}
static __rte_always_inline void
cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
/* CNXK model
* cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
*
* SSO_TT_ORDERED norm norm untag
* SSO_TT_ATOMIC norm norm untag
* SSO_TT_UNTAGGED norm norm NOOP
*/
if (new_tt == SSO_TT_UNTAGGED) {
if (cur_tt != SSO_TT_UNTAGGED)
cnxk_sso_hws_swtag_untag(
CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
} else {
cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
}
}
static __rte_always_inline void
cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
const struct rte_event *ev, const uint16_t grp)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
SSOW_LF_GWS_OP_UPD_WQP_GRP1);
cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
}
static __rte_always_inline void
cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
ws->swtag_req = 1;
} else {
/*
* Group has been changed for group based work pipelining,
* Use deschedule/add_work operation to transfer the event to
* new group/core
*/
cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
grp);
}
}
/* Dual ws ops. */
static __rte_always_inline uint8_t
cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint64_t event_ptr = ev->u64;
const uint16_t grp = ev->queue_id;
rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
if (dws->xaq_lmt <= *dws->fc_mem)
return 0;
cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
return 1;
}
static __rte_always_inline void
cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
struct cn9k_sso_hws_state *vws,
const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
cn9k_sso_hws_fwd_swtag(vws, ev);
dws->swtag_req = 1;
} else {
/*
* Group has been changed for group based work pipelining,
* Use deschedule/add_work operation to transfer the event to
* new group/core
*/
cn9k_sso_hws_fwd_group(vws, ev, grp);
}
}
static __rte_always_inline uint16_t
cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
struct cn9k_sso_hws_state *ws_pair,
struct rte_event *ev)
{
const uint64_t set_gw = BIT_ULL(16) | 1;
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
"rty%=: \n"
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: str %[gw], [%[pong]] \n"
" dmb ld \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
[gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
#else
gw.u64[0] = plt_read64(ws->tag_op);
while ((BIT_ULL(63)) & gw.u64[0])
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[1] = plt_read64(ws->wqp_op);
plt_write64(set_gw, ws_pair->getwrk_op);
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
return !!gw.u64[1];
}
static __rte_always_inline uint16_t
cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev)
{
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
plt_write64(BIT_ULL(16) | /* wait for work. */
1, /* Use Mask set 0. */
ws->getwrk_op);
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbz %[tag], 63, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
#else
gw.u64[0] = plt_read64(ws->tag_op);
while ((BIT_ULL(63)) & gw.u64[0])
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[1] = plt_read64(ws->wqp_op);
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
return !!gw.u64[1];
}
/* Used in cleaning up workslot. */
static __rte_always_inline uint16_t
cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
{
union {
__uint128_t get_work;
uint64_t u64[2];
} gw;
#ifdef RTE_ARCH_ARM64
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbz %[tag], 63, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[tag], [%[tag_loc]] \n"
" ldr %[wqp], [%[wqp_loc]] \n"
" tbnz %[tag], 63, rty%= \n"
"done%=: dmb ld \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
#else
gw.u64[0] = plt_read64(ws->tag_op);
while ((BIT_ULL(63)) & gw.u64[0])
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[1] = plt_read64(ws->wqp_op);
#endif
gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
(gw.u64[0] & (0x3FFull << 36)) << 4 |
(gw.u64[0] & 0xffffffff);
ev->event = gw.u64[0];
ev->u64 = gw.u64[1];
return !!gw.u64[1];
}
#endif

View File

@ -29,6 +29,16 @@
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
#define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY)
#define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY)
#define CNXK_EVENT_TYPE_FROM_TAG(x) (((x) >> 28) & 0xf)
#define CNXK_SUB_EVENT_FROM_TAG(x) (((x) >> 20) & 0xff)
#define CNXK_CLR_SUB_EVENT(x) (~(0xffu << 20) & x)
#define CNXK_GRP_FROM_TAG(x) (((x) >> 36) & 0x3ff)
#define CNXK_SWTAG_PEND(x) (BIT_ULL(62) & x)
#define CN9K_SSOW_GET_BASE_ADDR(_GW) ((_GW)-SSOW_LF_GWS_OP_GET_WORK0)
#define CN10K_GW_MODE_NONE 0
#define CN10K_GW_MODE_PREF 1
#define CN10K_GW_MODE_PREF_WFE 2

View File

@ -0,0 +1,101 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2021 Marvell.
*/
#ifndef __CNXK_WORKER_H__
#define __CNXK_WORKER_H__
#include "cnxk_eventdev.h"
/* SSO Operations */
static __rte_always_inline void
cnxk_sso_hws_add_work(const uint64_t event_ptr, const uint32_t tag,
const uint8_t new_tt, const uintptr_t grp_base)
{
uint64_t add_work0;
add_work0 = tag | ((uint64_t)(new_tt) << 32);
roc_store_pair(add_work0, event_ptr, grp_base);
}
static __rte_always_inline void
cnxk_sso_hws_swtag_desched(uint32_t tag, uint8_t new_tt, uint16_t grp,
uintptr_t swtag_desched_op)
{
uint64_t val;
val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
__atomic_store_n((uint64_t *)swtag_desched_op, val, __ATOMIC_RELEASE);
}
static __rte_always_inline void
cnxk_sso_hws_swtag_norm(uint32_t tag, uint8_t new_tt, uintptr_t swtag_norm_op)
{
uint64_t val;
val = tag | ((uint64_t)(new_tt & 0x3) << 32);
plt_write64(val, swtag_norm_op);
}
static __rte_always_inline void
cnxk_sso_hws_swtag_untag(uintptr_t swtag_untag_op)
{
plt_write64(0, swtag_untag_op);
}
static __rte_always_inline void
cnxk_sso_hws_swtag_flush(uint64_t tag_op, uint64_t flush_op)
{
if (CNXK_TT_FROM_TAG(plt_read64(tag_op)) == SSO_TT_EMPTY)
return;
plt_write64(0, flush_op);
}
static __rte_always_inline void
cnxk_sso_hws_swtag_wait(uintptr_t tag_op)
{
#ifdef RTE_ARCH_ARM64
uint64_t swtp;
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[swtb], [%[swtp_loc]] \n"
" tbz %[swtb], 62, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[swtb], [%[swtp_loc]] \n"
" tbnz %[swtb], 62, rty%= \n"
"done%=: \n"
: [swtb] "=&r"(swtp)
: [swtp_loc] "r"(tag_op));
#else
/* Wait for the SWTAG/SWTAG_FULL operation */
while (plt_read64(tag_op) & BIT_ULL(62))
;
#endif
}
static __rte_always_inline void
cnxk_sso_hws_head_wait(uintptr_t tag_op)
{
#ifdef RTE_ARCH_ARM64
uint64_t swtp;
asm volatile(PLT_CPU_FEATURE_PREAMBLE
" ldr %[swtb], [%[swtp_loc]] \n"
" tbz %[swtb], 35, done%= \n"
" sevl \n"
"rty%=: wfe \n"
" ldr %[swtb], [%[swtp_loc]] \n"
" tbnz %[swtb], 35, rty%= \n"
"done%=: \n"
: [swtb] "=&r"(swtp)
: [swtp_loc] "r"(tag_op));
#else
/* Wait for the SWTAG/SWTAG_FULL operation */
while (plt_read64(tag_op) & BIT_ULL(35))
;
#endif
}
#endif

View File

@ -10,7 +10,9 @@ endif
sources = files(
'cn9k_eventdev.c',
'cn9k_worker.c',
'cn10k_eventdev.c',
'cn10k_worker.c',
'cnxk_eventdev.c',
)