common/cnxk: add SSO XAQ pool create and free

Add common API to create and free SSO XAQ pool.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
This commit is contained in:
Pavan Nikhilesh 2021-11-03 06:22:09 +05:30 committed by Jerin Jacob
parent b7c71b4769
commit 49b0424ffb
5 changed files with 147 additions and 0 deletions

View File

@ -5,6 +5,8 @@
#include "roc_api.h"
#include "roc_priv.h"
#define SSO_XAQ_CACHE_CNT (0x7)
/* Private functions. */
int
sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
@ -387,6 +389,128 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
return mbox_process(dev->mbox);
}
int
sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
uint32_t nb_xae, uint32_t xae_waes,
uint32_t xaq_buf_size, uint16_t nb_hwgrp)
{
struct npa_pool_s pool;
struct npa_aura_s aura;
plt_iova_t iova;
uint32_t i;
int rc;
if (xaq->mem != NULL) {
rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
if (rc < 0) {
plt_err("Failed to release XAQ %d", rc);
return rc;
}
roc_npa_pool_destroy(xaq->aura_handle);
plt_free(xaq->fc);
plt_free(xaq->mem);
memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
}
xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
if (xaq->fc == NULL) {
plt_err("Failed to allocate XAQ FC");
rc = -ENOMEM;
goto fail;
}
xaq->nb_xae = nb_xae;
/* Taken from HRM 14.3.3(4) */
xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp);
xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq);
xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size);
if (xaq->mem == NULL) {
plt_err("Failed to allocate XAQ mem");
rc = -ENOMEM;
goto free_fc;
}
memset(&pool, 0, sizeof(struct npa_pool_s));
pool.nat_align = 1;
memset(&aura, 0, sizeof(aura));
aura.fc_ena = 1;
aura.fc_addr = (uint64_t)xaq->fc;
aura.fc_hyst_bits = 0; /* Store count on all updates */
rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
&aura, &pool);
if (rc) {
plt_err("Failed to create XAQ pool");
goto npa_fail;
}
iova = (uint64_t)xaq->mem;
for (i = 0; i < xaq->nb_xaq; i++) {
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
/* When SW does addwork (enqueue) check if there is space in XAQ by
* comparing fc_addr above against the xaq_lmt calculated below.
* There should be a minimum headroom of 7 XAQs per HWGRP for SSO
* to request XAQ to cache them even before enqueue is called.
*/
xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT);
return 0;
npa_fail:
plt_free(xaq->mem);
free_fc:
plt_free(xaq->fc);
fail:
memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
return rc;
}
int
roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
{
struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
roc_sso->xae_waes, roc_sso->xaq_buf_size,
roc_sso->nb_hwgrp);
}
int
sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
uint16_t nb_hwgrp)
{
int rc;
if (xaq->mem != NULL) {
if (nb_hwgrp) {
rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
if (rc < 0) {
plt_err("Failed to release XAQ %d", rc);
return rc;
}
}
roc_npa_pool_destroy(xaq->aura_handle);
plt_free(xaq->fc);
plt_free(xaq->mem);
}
memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
return 0;
}
int
roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
{
struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
}
int
sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
{

View File

@ -27,6 +27,15 @@ struct roc_sso_hwgrp_stats {
uint64_t page_cnt;
};
struct roc_sso_xaq_data {
uint32_t nb_xaq;
uint32_t nb_xae;
uint32_t xaq_lmt;
uint64_t aura_handle;
void *fc;
void *mem;
};
struct roc_sso {
struct plt_pci_device *pci_dev;
/* Public data. */
@ -35,6 +44,7 @@ struct roc_sso {
uint16_t nb_hwgrp;
uint8_t nb_hws;
uintptr_t lmt_base;
struct roc_sso_xaq_data xaq;
/* HW Const. */
uint32_t xae_waes;
uint32_t xaq_buf_size;
@ -95,6 +105,10 @@ int __roc_api roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso,
uintptr_t __roc_api roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws);
uintptr_t __roc_api roc_sso_hwgrp_base_get(struct roc_sso *roc_sso,
uint16_t hwgrp);
int __roc_api roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso,
uint32_t nb_xae);
int __roc_api roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso,
uint16_t nb_hwgrp);
/* Debug */
void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws,

View File

@ -47,6 +47,11 @@ void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
uint16_t hwgrp[], uint16_t n, uint16_t enable);
int sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps);
int sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps);
int sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
uint32_t nb_xae, uint32_t xae_waes,
uint32_t xaq_buf_size, uint16_t nb_hwgrp);
int sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
uint16_t nb_hwgrp);
/* SSO IRQ */
int sso_register_irqs_priv(struct roc_sso *roc_sso,

View File

@ -319,7 +319,9 @@ INTERNAL {
roc_sso_dump;
roc_sso_hwgrp_alloc_xaq;
roc_sso_hwgrp_base_get;
roc_sso_hwgrp_free_xaq_aura;
roc_sso_hwgrp_hws_link_status;
roc_sso_hwgrp_init_xaq_aura;
roc_sso_hwgrp_qos_config;
roc_sso_hwgrp_release_xaq;
roc_sso_hwgrp_set_priority;

View File

@ -132,6 +132,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
plt_write64(0, base + SSO_LF_GGRP_QCTL);
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
req = queue_id; /* GGRP ID */
req |= BIT_ULL(18); /* Grouped */
req |= BIT_ULL(16); /* WAIT */
@ -177,6 +178,7 @@ cn10k_sso_hws_reset(void *arg, void *hws)
} gw;
uint8_t pend_tt;
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
/* Wait till getwork/swtp/waitw/desched completes. */
do {
pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);