net/bnxt: modify context memory allocation

Newer devices like SR2 may have chip backing store and do not require
host backed memory allocation.

In these cases, HWRM_FUNC_BACKING_STORE_QCAPS will return a zero entry
size to indicate contexts for which the host should not allocate backing
store.

Selectively allocate context memory based on device capabilities and
only enable backing store for the appropriate contexts

Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Kalesh AP 2020-12-19 21:24:29 -08:00 committed by Ferruh Yigit
parent 4e936604d2
commit eb6ed2a99f
2 changed files with 39 additions and 24 deletions

View File

@ -4212,39 +4212,49 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
mem_size = ctx->qp_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
if (rc)
return rc;
if (ctx->qp_entry_size) {
mem_size = ctx->qp_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
if (rc)
return rc;
}
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries;
mem_size = ctx->srq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
if (rc)
return rc;
if (ctx->srq_entry_size) {
mem_size = ctx->srq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
if (rc)
return rc;
}
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries;
mem_size = ctx->cq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
if (rc)
return rc;
if (ctx->cq_entry_size) {
mem_size = ctx->cq_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
if (rc)
return rc;
}
ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
if (rc)
return rc;
if (ctx->vnic_entry_size) {
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
if (rc)
return rc;
}
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
if (rc)
return rc;
if (ctx->stat_entry_size) {
mem_size = ctx->stat_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
if (rc)
return rc;
}
min = ctx->tqm_min_entries_per_ring;
@ -4260,10 +4270,12 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = i ? entries : entries_sp;
mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
if (rc)
return rc;
if (ctx->tqm_entry_size) {
mem_size = ctx->tqm_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
if (rc)
return rc;
}
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
}

View File

@ -64,6 +64,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
uint8_t *pg_attr,
uint64_t *pg_dir)
{
if (rmem->nr_pages == 0)
return;
if (rmem->nr_pages > 1) {
*pg_attr = 1;
*pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);