event/cnxk: flush event queues over multiple pass
If an event queue flush does not complete after a fixed number of tries, remaining queues are flushed before retrying the one with incomplete flush. Signed-off-by: Shijith Thotton <sthotton@marvell.com>
This commit is contained in:
parent
7da7925f99
commit
68c050954f
@ -108,10 +108,11 @@ cn10k_sso_hws_release(void *arg, void *hws)
|
||||
memset(ws, 0, sizeof(*ws));
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
|
||||
cnxk_handle_event_t fn, void *arg)
|
||||
{
|
||||
uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
|
||||
struct cn10k_sso_hws *ws = hws;
|
||||
uint64_t cq_ds_cnt = 1;
|
||||
uint64_t aq_cnt = 1;
|
||||
@ -141,6 +142,8 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
|
||||
fn(arg, ev);
|
||||
if (ev.sched_type != SSO_TT_EMPTY)
|
||||
cnxk_sso_hws_swtag_flush(ws->base);
|
||||
else if (retry-- == 0)
|
||||
break;
|
||||
do {
|
||||
val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
|
||||
} while (val & BIT_ULL(56));
|
||||
@ -151,8 +154,13 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
|
||||
cq_ds_cnt &= 0x3FFF3FFF0000;
|
||||
}
|
||||
|
||||
if (aq_cnt || cq_ds_cnt || ds_cnt)
|
||||
return -EAGAIN;
|
||||
|
||||
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
|
||||
rte_mb();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -117,11 +117,12 @@ cn9k_sso_hws_release(void *arg, void *hws)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
|
||||
cnxk_handle_event_t fn, void *arg)
|
||||
{
|
||||
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
|
||||
uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
|
||||
struct cnxk_timesync_info *tstamp;
|
||||
struct cn9k_sso_hws_dual *dws;
|
||||
struct cn9k_sso_hws *ws;
|
||||
@ -164,6 +165,8 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
|
||||
fn(arg, ev);
|
||||
if (ev.sched_type != SSO_TT_EMPTY)
|
||||
cnxk_sso_hws_swtag_flush(ws_base);
|
||||
else if (retry-- == 0)
|
||||
break;
|
||||
do {
|
||||
val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
|
||||
} while (val & BIT_ULL(56));
|
||||
@ -174,7 +177,12 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
|
||||
cq_ds_cnt &= 0x3FFF3FFF0000;
|
||||
}
|
||||
|
||||
if (aq_cnt || cq_ds_cnt || ds_cnt)
|
||||
return -EAGAIN;
|
||||
|
||||
plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -464,9 +464,10 @@ static void
|
||||
cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
|
||||
cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
|
||||
{
|
||||
uint8_t pend_list[RTE_EVENT_MAX_QUEUES_PER_DEV], pend_cnt, new_pcnt;
|
||||
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
|
||||
uintptr_t hwgrp_base;
|
||||
uint16_t i;
|
||||
uint8_t queue_id, i;
|
||||
void *ws;
|
||||
|
||||
for (i = 0; i < dev->nb_event_ports; i++) {
|
||||
@ -475,14 +476,30 @@ cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
|
||||
}
|
||||
|
||||
rte_mb();
|
||||
|
||||
/* Consume all the events through HWS0 */
|
||||
ws = event_dev->data->ports[0];
|
||||
|
||||
for (i = 0; i < dev->nb_event_queues; i++) {
|
||||
/* Consume all the events through HWS0 */
|
||||
hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
|
||||
flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
|
||||
/* Enable/Disable SSO GGRP */
|
||||
plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
|
||||
/* Starting list of queues to flush */
|
||||
pend_cnt = dev->nb_event_queues;
|
||||
for (i = 0; i < dev->nb_event_queues; i++)
|
||||
pend_list[i] = i;
|
||||
|
||||
while (pend_cnt) {
|
||||
new_pcnt = 0;
|
||||
for (i = 0; i < pend_cnt; i++) {
|
||||
queue_id = pend_list[i];
|
||||
hwgrp_base =
|
||||
roc_sso_hwgrp_base_get(&dev->sso, queue_id);
|
||||
if (flush_fn(ws, queue_id, hwgrp_base,
|
||||
cnxk_handle_event, event_dev)) {
|
||||
pend_list[new_pcnt++] = queue_id;
|
||||
continue;
|
||||
}
|
||||
/* Enable/Disable SSO GGRP */
|
||||
plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
|
||||
}
|
||||
pend_cnt = new_pcnt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,8 @@
|
||||
|
||||
#define CNXK_QOS_NORMALIZE(val, min, max, cnt) \
|
||||
(min + val / ((max + cnt - 1) / cnt))
|
||||
#define CNXK_SSO_FLUSH_RETRY_MAX 0xfff
|
||||
|
||||
#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \
|
||||
do { \
|
||||
if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \
|
||||
@ -76,8 +78,8 @@ typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, uint16_t *map,
|
||||
uint16_t nb_link);
|
||||
typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev);
|
||||
typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws);
|
||||
typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
|
||||
cnxk_handle_event_t fn, void *arg);
|
||||
typedef int (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
|
||||
cnxk_handle_event_t fn, void *arg);
|
||||
|
||||
struct cnxk_sso_qos {
|
||||
uint16_t queue;
|
||||
|
Loading…
Reference in New Issue
Block a user