event/cnxk: rework enqueue path

Rework SSO enqueue path for CN9K make it similar to CN10K
enqueue interface.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2021-11-03 06:22:13 +05:30 committed by Jerin Jacob
parent 25d703151d
commit ea9ec3de0f
10 changed files with 96 additions and 129 deletions

View File

@ -27,17 +27,6 @@
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
static void
cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
{
ws->tag_op = base + SSOW_LF_GWS_TAG;
ws->wqp_op = base + SSOW_LF_GWS_WQP;
ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
}
static int
cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
{
@ -95,7 +84,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
uint64_t val;
/* Set get_work tmo for HWS */
val = NSEC2USEC(dev->deq_tmo_ns) - 1;
val = dev->deq_tmo_ns ? NSEC2USEC(dev->deq_tmo_ns) - 1 : 0;
if (dev->dual_ws) {
dws = hws;
dws->grp_base = grp_base;
@ -148,7 +137,6 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
struct cn9k_sso_hws_dual *dws;
struct cn9k_sso_hws_state *st;
struct cn9k_sso_hws *ws;
uint64_t cq_ds_cnt = 1;
uint64_t aq_cnt = 1;
@ -170,22 +158,21 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
if (dev->dual_ws) {
dws = hws;
st = &dws->ws_state[0];
ws_base = dws->base[0];
} else {
ws = hws;
st = (struct cn9k_sso_hws_state *)ws;
ws_base = ws->base;
}
while (aq_cnt || cq_ds_cnt || ds_cnt) {
plt_write64(req, st->getwrk_op);
cn9k_sso_hws_get_work_empty(st, &ev);
plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
cn9k_sso_hws_get_work_empty(ws_base, &ev);
if (fn != NULL && ev.u64 != 0)
fn(arg, ev);
if (ev.sched_type != SSO_TT_EMPTY)
cnxk_sso_hws_swtag_flush(st->tag_op,
st->swtag_flush_op);
cnxk_sso_hws_swtag_flush(
ws_base + SSOW_LF_GWS_TAG,
ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
do {
val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
} while (val & BIT_ULL(56));
@ -674,8 +661,6 @@ cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
dws->base[1] = roc_sso_hws_base_get(
&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
dws->hws_id = port_id;
dws->swtag_req = 0;
dws->vws = 0;
@ -695,7 +680,6 @@ cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
/* First cache line is reserved for cookie */
ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
ws->hws_id = port_id;
ws->swtag_req = 0;

View File

@ -19,7 +19,8 @@ cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
cn9k_sso_hws_forward_event(ws, ev);
break;
case RTE_EVENT_OP_RELEASE:
cnxk_sso_hws_swtag_flush(ws->tag_op, ws->swtag_flush_op);
cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG,
ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
break;
default:
return 0;
@ -67,17 +68,18 @@ uint16_t __rte_hot
cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
{
struct cn9k_sso_hws_dual *dws = port;
struct cn9k_sso_hws_state *vws;
uint64_t base;
vws = &dws->ws_state[!dws->vws];
base = dws->base[!dws->vws];
switch (ev->op) {
case RTE_EVENT_OP_NEW:
return cn9k_sso_hws_dual_new_event(dws, ev);
case RTE_EVENT_OP_FORWARD:
cn9k_sso_hws_dual_forward_event(dws, vws, ev);
cn9k_sso_hws_dual_forward_event(dws, base, ev);
break;
case RTE_EVENT_OP_RELEASE:
cnxk_sso_hws_swtag_flush(vws->tag_op, vws->swtag_flush_op);
cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
break;
default:
return 0;
@ -114,7 +116,7 @@ cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
struct cn9k_sso_hws_dual *dws = port;
RTE_SET_USED(nb_events);
cn9k_sso_hws_dual_forward_event(dws, &dws->ws_state[!dws->vws], ev);
cn9k_sso_hws_dual_forward_event(dws, dws->base[!dws->vws], ev);
return 1;
}
@ -126,7 +128,8 @@ cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
RTE_SET_USED(nb_events);
return cn9k_cpt_crypto_adapter_enqueue(ws->tag_op, ev->event_ptr);
return cn9k_cpt_crypto_adapter_enqueue(ws->base + SSOW_LF_GWS_TAG,
ev->event_ptr);
}
uint16_t __rte_hot
@ -136,6 +139,6 @@ cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
RTE_SET_USED(nb_events);
return cn9k_cpt_crypto_adapter_enqueue(dws->ws_state[!dws->vws].tag_op,
ev->event_ptr);
return cn9k_cpt_crypto_adapter_enqueue(
dws->base[!dws->vws] + SSOW_LF_GWS_TAG, ev->event_ptr);
}

View File

@ -37,12 +37,12 @@ cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
}
static __rte_always_inline void
cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
const struct rte_event *ev)
cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
const uint8_t cur_tt =
CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG));
/* CNXK model
* cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
@ -54,24 +54,24 @@ cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
if (new_tt == SSO_TT_UNTAGGED) {
if (cur_tt != SSO_TT_UNTAGGED)
cnxk_sso_hws_swtag_untag(
CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
cnxk_sso_hws_swtag_untag(base +
SSOW_LF_GWS_OP_SWTAG_UNTAG);
} else {
cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
cnxk_sso_hws_swtag_norm(tag, new_tt,
base + SSOW_LF_GWS_OP_SWTAG_NORM);
}
}
static __rte_always_inline void
cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
const struct rte_event *ev, const uint16_t grp)
cn9k_sso_hws_fwd_group(uint64_t base, const struct rte_event *ev,
const uint16_t grp)
{
const uint32_t tag = (uint32_t)ev->event;
const uint8_t new_tt = ev->sched_type;
plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
SSOW_LF_GWS_OP_UPD_WQP_GRP1);
cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
plt_write64(ev->u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
}
static __rte_always_inline void
@ -80,8 +80,8 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_TAG)) == grp) {
cn9k_sso_hws_fwd_swtag(ws->base, ev);
ws->swtag_req = 1;
} else {
/*
@ -89,8 +89,7 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
* Use deschedule/add_work operation to transfer the event to
* new group/core
*/
cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
grp);
cn9k_sso_hws_fwd_group(ws->base, ev, grp);
}
}
@ -115,15 +114,14 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
}
static __rte_always_inline void
cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
struct cn9k_sso_hws_state *vws,
cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
const struct rte_event *ev)
{
const uint8_t grp = ev->queue_id;
/* Group hasn't changed, Use SWTAG to forward the event */
if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
cn9k_sso_hws_fwd_swtag(vws, ev);
if (CNXK_GRP_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) == grp) {
cn9k_sso_hws_fwd_swtag(base, ev);
dws->swtag_req = 1;
} else {
/*
@ -131,7 +129,7 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
* Use deschedule/add_work operation to transfer the event to
* new group/core
*/
cn9k_sso_hws_fwd_group(vws, ev, grp);
cn9k_sso_hws_fwd_group(base, ev, grp);
}
}
@ -149,8 +147,7 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
}
static __rte_always_inline uint16_t
cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
struct cn9k_sso_hws_state *ws_pair,
cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
struct rte_event *ev, const uint32_t flags,
const void *const lookup_mem,
struct cnxk_timesync_info *const tstamp)
@ -177,14 +174,15 @@ cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
" prfm pldl1keep, [%[mbuf]] \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
[mbuf] "=&r"(mbuf)
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
[gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
: [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
[wqp_loc] "r"(base + SSOW_LF_GWS_WQP), [gw] "r"(set_gw),
[pong] "r"(pair_base + SSOW_LF_GWS_OP_GET_WORK0));
#else
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
while ((BIT_ULL(63)) & gw.u64[0])
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[1] = plt_read64(ws->wqp_op);
plt_write64(set_gw, ws_pair->getwrk_op);
gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
plt_write64(set_gw, pair_base + SSOW_LF_GWS_OP_GET_WORK0);
mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
@ -236,7 +234,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
plt_write64(BIT_ULL(16) | /* wait for work. */
1, /* Use Mask set 0. */
ws->getwrk_op);
ws->base + SSOW_LF_GWS_OP_GET_WORK0);
if (flags & NIX_RX_OFFLOAD_PTYPE_F)
rte_prefetch_non_temporal(lookup_mem);
@ -255,13 +253,14 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
" prfm pldl1keep, [%[mbuf]] \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
[mbuf] "=&r"(mbuf)
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
: [tag_loc] "r"(ws->base + SSOW_LF_GWS_TAG),
[wqp_loc] "r"(ws->base + SSOW_LF_GWS_WQP));
#else
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
while ((BIT_ULL(63)) & gw.u64[0])
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
gw.u64[1] = plt_read64(ws->wqp_op);
gw.u64[1] = plt_read64(ws->base + SSOW_LF_GWS_WQP);
mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif
@ -303,7 +302,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
/* Used in cleaning up workslot. */
static __rte_always_inline uint16_t
cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
{
union {
__uint128_t get_work;
@ -325,13 +324,14 @@ cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
" sub %[mbuf], %[wqp], #0x80 \n"
: [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
[mbuf] "=&r"(mbuf)
: [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
: [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
[wqp_loc] "r"(base + SSOW_LF_GWS_WQP));
#else
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
while ((BIT_ULL(63)) & gw.u64[0])
gw.u64[0] = plt_read64(ws->tag_op);
gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
gw.u64[1] = plt_read64(ws->wqp_op);
gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
#endif

View File

@ -16,7 +16,7 @@
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait(ws->tag_op); \
cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
return 1; \
} \
\
@ -32,7 +32,7 @@
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait(ws->tag_op); \
cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
return 1; \
} \
\

View File

@ -16,7 +16,7 @@
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait(ws->tag_op); \
cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
return 1; \
} \
\
@ -42,7 +42,7 @@
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait(ws->tag_op); \
cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
return 1; \
} \
\

View File

@ -16,7 +16,7 @@
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait(ws->tag_op); \
cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
return ret; \
} \
\
@ -46,7 +46,7 @@
\
if (ws->swtag_req) { \
ws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait(ws->tag_op); \
cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
return ret; \
} \
\

View File

@ -16,14 +16,14 @@
RTE_SET_USED(timeout_ticks); \
if (dws->swtag_req) { \
dws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait( \
dws->ws_state[!dws->vws].tag_op); \
cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
SSOW_LF_GWS_TAG); \
return 1; \
} \
\
gw = cn9k_sso_hws_dual_get_work( \
&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
ev, flags, dws->lookup_mem, dws->tstamp); \
dws->base[dws->vws], dws->base[!dws->vws], ev, flags, \
dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
return gw; \
} \
@ -37,14 +37,14 @@
RTE_SET_USED(timeout_ticks); \
if (dws->swtag_req) { \
dws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait( \
dws->ws_state[!dws->vws].tag_op); \
cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
SSOW_LF_GWS_TAG); \
return 1; \
} \
\
gw = cn9k_sso_hws_dual_get_work( \
&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
ev, flags, dws->lookup_mem, dws->tstamp); \
dws->base[dws->vws], dws->base[!dws->vws], ev, flags, \
dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
return gw; \
}

View File

@ -16,15 +16,14 @@
RTE_SET_USED(timeout_ticks); \
if (dws->swtag_req) { \
dws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait( \
dws->ws_state[!dws->vws].tag_op); \
cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
SSOW_LF_GWS_TAG); \
return 1; \
} \
\
gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws], \
&dws->ws_state[!dws->vws], ev, \
flags | CPT_RX_WQE_F, \
dws->lookup_mem, dws->tstamp); \
gw = cn9k_sso_hws_dual_get_work( \
dws->base[dws->vws], dws->base[!dws->vws], ev, \
flags | CPT_RX_WQE_F, dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
return gw; \
} \
@ -48,14 +47,14 @@
RTE_SET_USED(timeout_ticks); \
if (dws->swtag_req) { \
dws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait( \
dws->ws_state[!dws->vws].tag_op); \
cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
SSOW_LF_GWS_TAG); \
return 1; \
} \
\
gw = cn9k_sso_hws_dual_get_work( \
&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \
dws->base[dws->vws], dws->base[!dws->vws], ev, \
flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \
dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
return gw; \

View File

@ -16,20 +16,19 @@
\
if (dws->swtag_req) { \
dws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait( \
dws->ws_state[!dws->vws].tag_op); \
cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
SSOW_LF_GWS_TAG); \
return ret; \
} \
\
ret = cn9k_sso_hws_dual_get_work( \
&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
ev, flags, dws->lookup_mem, dws->tstamp); \
dws->base[dws->vws], dws->base[!dws->vws], ev, flags, \
dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \
ret = cn9k_sso_hws_dual_get_work( \
&dws->ws_state[dws->vws], \
&dws->ws_state[!dws->vws], ev, flags, \
dws->lookup_mem, dws->tstamp); \
dws->base[dws->vws], dws->base[!dws->vws], ev, \
flags, dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
} \
\
@ -55,20 +54,19 @@
\
if (dws->swtag_req) { \
dws->swtag_req = 0; \
cnxk_sso_hws_swtag_wait( \
dws->ws_state[!dws->vws].tag_op); \
cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
SSOW_LF_GWS_TAG); \
return ret; \
} \
\
ret = cn9k_sso_hws_dual_get_work( \
&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
ev, flags, dws->lookup_mem, dws->tstamp); \
dws->base[dws->vws], dws->base[!dws->vws], ev, flags, \
dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \
ret = cn9k_sso_hws_dual_get_work( \
&dws->ws_state[dws->vws], \
&dws->ws_state[!dws->vws], ev, flags, \
dws->lookup_mem, dws->tstamp); \
dws->base[dws->vws], dws->base[!dws->vws], ev, \
flags, dws->lookup_mem, dws->tstamp); \
dws->vws = !dws->vws; \
} \
\

View File

@ -136,19 +136,9 @@ struct cn10k_sso_hws {
uint8_t tx_adptr_data[];
} __rte_cache_aligned;
/* CN9K HWS ops */
#define CN9K_SSO_HWS_OPS \
uintptr_t swtag_desched_op; \
uintptr_t swtag_flush_op; \
uintptr_t swtag_norm_op; \
uintptr_t getwrk_op; \
uintptr_t tag_op; \
uintptr_t wqp_op
/* Event port a.k.a GWS */
struct cn9k_sso_hws {
/* Get Work Fastpath data */
CN9K_SSO_HWS_OPS;
uint64_t base;
/* PTP timestamp */
struct cnxk_timesync_info *tstamp;
void *lookup_mem;
@ -159,17 +149,11 @@ struct cn9k_sso_hws {
uint64_t *fc_mem;
uintptr_t grp_base;
/* Tx Fastpath data */
uint64_t base __rte_cache_aligned;
uint8_t tx_adptr_data[];
uint8_t tx_adptr_data[] __rte_cache_aligned;
} __rte_cache_aligned;
struct cn9k_sso_hws_state {
CN9K_SSO_HWS_OPS;
};
struct cn9k_sso_hws_dual {
/* Get Work Fastpath data */
struct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */
uint64_t base[2]; /* Ping and Pong */
/* PTP timestamp */
struct cnxk_timesync_info *tstamp;
void *lookup_mem;
@ -181,8 +165,7 @@ struct cn9k_sso_hws_dual {
uint64_t *fc_mem;
uintptr_t grp_base;
/* Tx Fastpath data */
uint64_t base[2] __rte_cache_aligned;
uint8_t tx_adptr_data[];
uint8_t tx_adptr_data[] __rte_cache_aligned;
} __rte_cache_aligned;
struct cnxk_sso_hws_cookie {