common/cnxk: reserve AURA zero on CN10KA NPA

Reserve AURA id 0 on cn10k and provide mechanism to specifically
allocate it and free it via roc_npa_* API's.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2022-09-12 18:44:02 +05:30 committed by Jerin Jacob
parent da1ec39060
commit 8e5a4adb4f
8 changed files with 97 additions and 24 deletions

View File

@ -75,7 +75,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi)
memset(&aura, 0, sizeof(aura));
rc = roc_npa_pool_create(&aura_handle, DPI_CMD_QUEUE_SIZE,
DPI_CMD_QUEUE_BUFS, &aura, &pool);
DPI_CMD_QUEUE_BUFS, &aura, &pool, 0);
if (rc) {
plt_err("Failed to create NPA pool, err %d\n", rc);
return rc;

View File

@ -713,7 +713,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
aura.fc_addr = (uint64_t)sq->fc;
aura.fc_hyst_bits = 0; /* Store count on all updates */
rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
&pool);
&pool, 0);
if (rc)
goto fail;

View File

@ -260,16 +260,60 @@ bitmap_ctzll(uint64_t slab)
return __builtin_ctzll(slab);
}
static int
find_free_aura(struct npa_lf *lf, uint32_t flags)
{
struct plt_bitmap *bmp = lf->npa_bmp;
uint64_t aura0_state = 0;
uint64_t slab;
uint32_t pos;
int idx = -1;
int rc;
if (flags & ROC_NPA_ZERO_AURA_F) {
/* Only look for zero aura */
if (plt_bitmap_get(bmp, 0))
return 0;
plt_err("Zero aura already in use");
return -1;
}
if (lf->zero_aura_rsvd) {
/* Save and clear zero aura bit if needed */
aura0_state = plt_bitmap_get(bmp, 0);
if (aura0_state)
plt_bitmap_clear(bmp, 0);
}
pos = 0;
slab = 0;
/* Scan from the beginning */
plt_bitmap_scan_init(bmp);
/* Scan bitmap to get the free pool */
rc = plt_bitmap_scan(bmp, &pos, &slab);
/* Empty bitmap */
if (rc == 0) {
plt_err("Aura's exhausted");
goto empty;
}
idx = pos + bitmap_ctzll(slab);
empty:
if (lf->zero_aura_rsvd && aura0_state)
plt_bitmap_set(bmp, 0);
return idx;
}
static int
npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
const uint32_t block_count, struct npa_aura_s *aura,
struct npa_pool_s *pool, uint64_t *aura_handle)
struct npa_pool_s *pool, uint64_t *aura_handle,
uint32_t flags)
{
int rc, aura_id, pool_id, stack_size, alloc_size;
char name[PLT_MEMZONE_NAMESIZE];
const struct plt_memzone *mz;
uint64_t slab;
uint32_t pos;
/* Sanity check */
if (!lf || !block_size || !block_count || !pool || !aura ||
@ -281,20 +325,11 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
block_size > ROC_NPA_MAX_BLOCK_SZ)
return NPA_ERR_INVALID_BLOCK_SZ;
pos = 0;
slab = 0;
/* Scan from the beginning */
plt_bitmap_scan_init(lf->npa_bmp);
/* Scan bitmap to get the free pool */
rc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab);
/* Empty bitmap */
if (rc == 0) {
plt_err("Mempools exhausted");
return NPA_ERR_AURA_ID_ALLOC;
}
/* Get aura_id from resource bitmap */
aura_id = pos + bitmap_ctzll(slab);
aura_id = find_free_aura(lf, flags);
if (aura_id < 0)
return NPA_ERR_AURA_ID_ALLOC;
/* Mark pool as reserved */
plt_bitmap_clear(lf->npa_bmp, aura_id);
@ -374,7 +409,7 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
int
roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
uint32_t block_count, struct npa_aura_s *aura,
struct npa_pool_s *pool)
struct npa_pool_s *pool, uint32_t flags)
{
struct npa_aura_s defaura;
struct npa_pool_s defpool;
@ -394,6 +429,11 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
goto error;
}
if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
rc = NPA_ERR_ALLOC;
goto error;
}
if (aura == NULL) {
memset(&defaura, 0, sizeof(struct npa_aura_s));
aura = &defaura;
@ -406,7 +446,7 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
}
rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool,
aura_handle);
aura_handle, flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
@ -522,6 +562,26 @@ roc_npa_pool_range_update_check(uint64_t aura_handle)
return 0;
}
uint64_t
roc_npa_zero_aura_handle(void)
{
struct idev_cfg *idev;
struct npa_lf *lf;
lf = idev_npa_obj_get();
if (lf == NULL)
return NPA_ERR_DEVICE_NOT_BOUNDED;
idev = idev_get_cfg();
if (idev == NULL)
return NPA_ERR_ALLOC;
/* Return aura handle only if reserved */
if (lf->zero_aura_rsvd)
return roc_npa_aura_handle_gen(0, lf->base);
return 0;
}
static inline int
npa_attach(struct mbox *mbox)
{
@ -672,6 +732,10 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
for (i = 0; i < nr_pools; i++)
plt_bitmap_set(lf->npa_bmp, i);
/* Reserve zero aura for all models other than CN9K */
if (!roc_model_is_cn9k())
lf->zero_aura_rsvd = true;
/* Allocate memory for qint context */
lf->npa_qint_mem = plt_zmalloc(sizeof(struct npa_qint) * nr_pools, 0);
if (lf->npa_qint_mem == NULL) {

View File

@ -711,10 +711,13 @@ struct roc_npa {
int __roc_api roc_npa_dev_init(struct roc_npa *roc_npa);
int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
/* Flags to pool create */
#define ROC_NPA_ZERO_AURA_F BIT(0)
/* NPA pool */
int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
uint32_t block_count, struct npa_aura_s *aura,
struct npa_pool_s *pool);
struct npa_pool_s *pool, uint32_t flags);
int __roc_api roc_npa_aura_limit_modify(uint64_t aura_handle,
uint16_t aura_limit);
int __roc_api roc_npa_pool_destroy(uint64_t aura_handle);
@ -722,6 +725,7 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
uint64_t start_iova,
uint64_t end_iova);
uint64_t __roc_api roc_npa_zero_aura_handle(void);
/* Init callbacks */
typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);

View File

@ -32,6 +32,7 @@ struct npa_lf {
uint8_t aura_sz;
uint32_t qints;
uintptr_t base;
bool zero_aura_rsvd;
};
struct npa_qint {

View File

@ -473,7 +473,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
aura.fc_addr = (uint64_t)xaq->fc;
aura.fc_hyst_bits = 0; /* Store count on all updates */
rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
&aura, &pool);
&aura, &pool, 0);
if (rc) {
plt_err("Failed to create XAQ pool");
goto npa_fail;

View File

@ -318,6 +318,7 @@ INTERNAL {
roc_npa_pool_destroy;
roc_npa_pool_op_pc_reset;
roc_npa_pool_range_update_check;
roc_npa_zero_aura_handle;
roc_npc_fini;
roc_npc_flow_create;
roc_npc_flow_destroy;

View File

@ -72,10 +72,10 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
int
cnxk_mempool_alloc(struct rte_mempool *mp)
{
uint32_t block_count, flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
uint32_t block_count;
size_t block_size;
int rc = -ERANGE;
@ -100,8 +100,11 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
if (mp->pool_config != NULL)
memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
if (aura.ena && aura.pool_addr == 0)
flags = ROC_NPA_ZERO_AURA_F;
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
&pool);
&pool, flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;