event/cnxk: fix stale data in workslot

Fix stale XAQ depth check pointers in workslot memory after
XAQ pool resize.

Fixes: bd64a963d2 ("event/cnxk: use common XAQ pool functions")
Cc: stable@dpdk.org

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2022-07-25 14:05:45 +05:30 committed by Jerin Jacob
parent 2f279a1b6e
commit 3fe71706ab
4 changed files with 53 additions and 11 deletions

View File

@ -841,8 +841,11 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, u
for (i = 0; i < dev->nb_event_ports; i++) {
struct cn10k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
ws->xaq_lmt = dev->xaq_lmt;
ws->fc_mem = (uint64_t *)dev->fc_iova;
ws->tstamp = dev->tstamp;
if (lookup_mem)
ws->lookup_mem = lookup_mem;
if (meta_aura)
ws->meta_aura = meta_aura;
}
@ -1034,6 +1037,7 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
const struct rte_event *event)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int ret;
RTE_SET_USED(event);
@ -1043,7 +1047,10 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
dev->is_ca_internal_port = 1;
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
cn10k_sso_set_priv_mem(event_dev, NULL, 0);
return ret;
}
static int
@ -1057,6 +1064,14 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
}
static int
cn10k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
uint32_t *caps, const struct event_timer_adapter_ops **ops)
{
return cnxk_tim_caps_get(evdev, flags, caps, ops,
cn10k_sso_set_priv_mem);
}
static struct eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
@ -1089,7 +1104,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.eth_tx_adapter_stop = cnxk_sso_tx_adapter_stop,
.eth_tx_adapter_free = cnxk_sso_tx_adapter_free,
.timer_adapter_caps_get = cnxk_tim_caps_get,
.timer_adapter_caps_get = cn10k_tim_caps_get,
.crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
.crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,

View File

@ -942,7 +942,8 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
uint64_t aura __rte_unused)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@ -951,12 +952,18 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
if (dev->dual_ws) {
struct cn9k_sso_hws_dual *dws =
event_dev->data->ports[i];
dws->lookup_mem = lookup_mem;
dws->xaq_lmt = dev->xaq_lmt;
dws->fc_mem = (uint64_t *)dev->fc_iova;
dws->tstamp = dev->tstamp;
if (lookup_mem)
dws->lookup_mem = lookup_mem;
} else {
struct cn9k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
ws->xaq_lmt = dev->xaq_lmt;
ws->fc_mem = (uint64_t *)dev->fc_iova;
ws->tstamp = dev->tstamp;
if (lookup_mem)
ws->lookup_mem = lookup_mem;
}
}
}
@ -982,7 +989,7 @@ cn9k_sso_rx_adapter_queue_add(
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
cn9k_sso_set_priv_mem(event_dev, lookup_mem);
cn9k_sso_set_priv_mem(event_dev, lookup_mem, 0);
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
@ -1121,6 +1128,7 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
int32_t queue_pair_id, const struct rte_event *event)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int ret;
RTE_SET_USED(event);
@ -1130,7 +1138,10 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
dev->is_ca_internal_port = 1;
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
cn9k_sso_set_priv_mem(event_dev, NULL, 0);
return ret;
}
static int
@ -1144,6 +1155,14 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
}
static int
cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
uint32_t *caps, const struct event_timer_adapter_ops **ops)
{
return cnxk_tim_caps_get(evdev, flags, caps, ops,
cn9k_sso_set_priv_mem);
}
static struct eventdev_ops cn9k_sso_dev_ops = {
.dev_infos_get = cn9k_sso_info_get,
.dev_configure = cn9k_sso_dev_configure,
@ -1174,7 +1193,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.eth_tx_adapter_stop = cnxk_sso_tx_adapter_stop,
.eth_tx_adapter_free = cnxk_sso_tx_adapter_free,
.timer_adapter_caps_get = cnxk_tim_caps_get,
.timer_adapter_caps_get = cn9k_tim_caps_get,
.crypto_adapter_caps_get = cn9k_crypto_adapter_caps_get,
.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,

View File

@ -8,6 +8,7 @@
#include "cnxk_tim_evdev.h"
static struct event_timer_adapter_ops cnxk_tim_ops;
static cnxk_sso_set_priv_mem_t sso_set_priv_mem_fn;
static int
cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
@ -353,6 +354,7 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
cnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring,
RTE_EVENT_TYPE_TIMER);
cnxk_sso_xae_reconfigure(dev->event_dev);
sso_set_priv_mem_fn(dev->event_dev, NULL, 0);
plt_tim_dbg(
"Total memory used %" PRIu64 "MB\n",
@ -483,7 +485,8 @@ cnxk_tim_stats_reset(const struct rte_event_timer_adapter *adapter)
int
cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
uint32_t *caps, const struct event_timer_adapter_ops **ops)
uint32_t *caps, const struct event_timer_adapter_ops **ops,
cnxk_sso_set_priv_mem_t priv_mem_fn)
{
struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
@ -497,6 +500,7 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
cnxk_tim_ops.start = cnxk_tim_ring_start;
cnxk_tim_ops.stop = cnxk_tim_ring_stop;
cnxk_tim_ops.get_info = cnxk_tim_ring_info_get;
sso_set_priv_mem_fn = priv_mem_fn;
if (dev->enable_stats) {
cnxk_tim_ops.stats_get = cnxk_tim_stats_get;

View File

@ -78,6 +78,9 @@
#define TIM_BUCKET_SEMA_WLOCK \
(TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
typedef void (*cnxk_sso_set_priv_mem_t)(const struct rte_eventdev *event_dev,
void *lookup_mem, uint64_t aura);
struct cnxk_tim_ctl {
uint16_t ring;
uint16_t chunk_slots;
@ -290,7 +293,8 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
int cnxk_tim_caps_get(const struct rte_eventdev *dev, uint64_t flags,
uint32_t *caps,
const struct event_timer_adapter_ops **ops);
const struct event_timer_adapter_ops **ops,
cnxk_sso_set_priv_mem_t priv_mem_fn);
void cnxk_tim_init(struct roc_sso *sso);
void cnxk_tim_fini(void);