common/cnxk: support NIX TM internal hierarchy

Add support to create internal TM default hierarchy and ratelimit
hierarchy and API to ratelimit SQ to a given rate. This will be
used by cnxk ethdev driver's tx queue ratelimit op.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2021-04-06 20:11:29 +05:30 committed by Jerin Jacob
parent 0885429c30
commit 5a960e265d
5 changed files with 309 additions and 0 deletions

View File

@ -330,6 +330,8 @@ enum roc_tm_node_level {
/*
* TM runtime hierarchy init API.
*/
int __roc_api roc_nix_tm_init(struct roc_nix *roc_nix);
void __roc_api roc_nix_tm_fini(struct roc_nix *roc_nix);
int __roc_api roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable);
int __roc_api roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq);
@ -391,6 +393,11 @@ roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id);
struct roc_nix_tm_shaper_profile *__roc_api roc_nix_tm_shaper_profile_next(
struct roc_nix *roc_nix, struct roc_nix_tm_shaper_profile *__prev);
/*
* TM ratelimit tree API.
*/
int __roc_api roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid,
uint64_t rate);
/*
* TM hierarchy enable/disable API.
*/

View File

@ -326,6 +326,7 @@ int nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum,
int nix_tm_sq_flush_pre(struct roc_nix_sq *sq);
int nix_tm_sq_flush_post(struct roc_nix_sq *sq);
int nix_tm_smq_xoff(struct nix *nix, struct nix_tm_node *node, bool enable);
int nix_tm_prepare_default_tree(struct roc_nix *roc_nix);
int nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node);
int nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
enum roc_nix_tm_tree tree, bool free);
@ -344,6 +345,7 @@ int nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
bool rr_quantum_only);
int nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
/*
* TM priv utils.

View File

@ -1088,6 +1088,162 @@ alloc_err:
return rc;
}
int
nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint32_t nonleaf_id = nix->nb_tx_queues;
struct nix_tm_node *node = NULL;
uint8_t leaf_lvl, lvl, lvl_end;
uint32_t parent, i;
int rc = 0;
/* Add ROOT, SCH1, SCH2, SCH3, [SCH4] nodes */
parent = ROC_NIX_TM_NODE_ID_INVALID;
/* With TL1 access we have an extra level */
lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 :
ROC_TM_LVL_SCH3);
for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
goto error;
node->id = nonleaf_id;
node->parent_id = parent;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_DEFAULT;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
goto error;
parent = nonleaf_id;
nonleaf_id++;
}
parent = nonleaf_id - 1;
leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
ROC_TM_LVL_SCH4);
/* Add leaf nodes */
for (i = 0; i < nix->nb_tx_queues; i++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
goto error;
node->id = i;
node->parent_id = parent;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = leaf_lvl;
node->tree = ROC_NIX_TM_DEFAULT;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
goto error;
}
return 0;
error:
nix_tm_node_free(node);
return rc;
}
int
nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint32_t nonleaf_id = nix->nb_tx_queues;
struct nix_tm_node *node = NULL;
uint8_t leaf_lvl, lvl, lvl_end;
uint32_t parent, i;
int rc = 0;
/* Add ROOT, SCH1, SCH2 nodes */
parent = ROC_NIX_TM_NODE_ID_INVALID;
lvl_end = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH3 :
ROC_TM_LVL_SCH2);
for (lvl = ROC_TM_LVL_ROOT; lvl <= lvl_end; lvl++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
goto error;
node->id = nonleaf_id;
node->parent_id = parent;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_RLIMIT;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
goto error;
parent = nonleaf_id;
nonleaf_id++;
}
/* SMQ is mapped to SCH4 when we have TL1 access and SCH3 otherwise */
lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_SCH4 : ROC_TM_LVL_SCH3);
/* Add per queue SMQ nodes i.e SCH4 / SCH3 */
for (i = 0; i < nix->nb_tx_queues; i++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
goto error;
node->id = nonleaf_id + i;
node->parent_id = parent;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = lvl;
node->tree = ROC_NIX_TM_RLIMIT;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
goto error;
}
parent = nonleaf_id;
leaf_lvl = (nix_tm_have_tl1_access(nix) ? ROC_TM_LVL_QUEUE :
ROC_TM_LVL_SCH4);
/* Add leaf nodes */
for (i = 0; i < nix->nb_tx_queues; i++) {
rc = -ENOMEM;
node = nix_tm_node_alloc();
if (!node)
goto error;
node->id = i;
node->parent_id = parent;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
node->lvl = leaf_lvl;
node->tree = ROC_NIX_TM_RLIMIT;
rc = nix_tm_node_add(roc_nix, node);
if (rc)
goto error;
}
return 0;
error:
nix_tm_node_free(node);
return rc;
}
int
nix_tm_free_resources(struct roc_nix *roc_nix, uint32_t tree_mask, bool hw_only)
{

View File

@ -543,3 +543,144 @@ skip_sq_update:
nix->tm_flags |= NIX_TM_HIERARCHY_ENA;
return 0;
}
int
roc_nix_tm_init(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint32_t tree_mask;
int rc;
if (nix->tm_flags & NIX_TM_HIERARCHY_ENA) {
plt_err("Cannot init while existing hierarchy is enabled");
return -EBUSY;
}
/* Free up all user resources already held */
tree_mask = NIX_TM_TREE_MASK_ALL;
rc = nix_tm_free_resources(roc_nix, tree_mask, false);
if (rc) {
plt_err("Failed to freeup all nodes and resources, rc=%d", rc);
return rc;
}
/* Prepare default tree */
rc = nix_tm_prepare_default_tree(roc_nix);
if (rc) {
plt_err("failed to prepare default tm tree, rc=%d", rc);
return rc;
}
/* Prepare rlimit tree */
rc = nix_tm_prepare_rate_limited_tree(roc_nix);
if (rc) {
plt_err("failed to prepare rlimit tm tree, rc=%d", rc);
return rc;
}
return rc;
}
int
roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct nix_tm_shaper_profile profile;
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_tm_node *node, *parent;
volatile uint64_t *reg, *regval;
struct nix_txschq_config *req;
uint16_t flags;
uint8_t k = 0;
int rc;
if (nix->tm_tree != ROC_NIX_TM_RLIMIT ||
!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
return NIX_ERR_TM_INVALID_TREE;
node = nix_tm_node_search(nix, qid, ROC_NIX_TM_RLIMIT);
/* check if we found a valid leaf node */
if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
node->parent->hw_id == NIX_TM_HW_ID_INVALID)
return NIX_ERR_TM_INVALID_NODE;
parent = node->parent;
flags = parent->flags;
req = mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = NIX_TXSCH_LVL_MDQ;
reg = req->reg;
regval = req->regval;
if (rate == 0) {
k += nix_tm_sw_xoff_prep(parent, true, &reg[k], &regval[k]);
flags &= ~NIX_TM_NODE_ENABLED;
goto exit;
}
if (!(flags & NIX_TM_NODE_ENABLED)) {
k += nix_tm_sw_xoff_prep(parent, false, &reg[k], &regval[k]);
flags |= NIX_TM_NODE_ENABLED;
}
/* Use only PIR for rate limit */
memset(&profile, 0, sizeof(profile));
profile.peak.rate = rate;
/* Minimum burst of ~4us Bytes of Tx */
profile.peak.size = PLT_MAX((uint64_t)roc_nix_max_pkt_len(roc_nix),
(4ul * rate) / ((uint64_t)1E6 * 8));
if (!nix->tm_rate_min || nix->tm_rate_min > rate)
nix->tm_rate_min = rate;
k += nix_tm_shaper_reg_prep(parent, &profile, &reg[k], &regval[k]);
exit:
req->num_regs = k;
rc = mbox_process(mbox);
if (rc)
return rc;
parent->flags = flags;
return 0;
}
void
roc_nix_tm_fini(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct mbox *mbox = (&nix->dev)->mbox;
struct nix_txsch_free_req *req;
uint32_t tree_mask;
uint8_t hw_lvl;
int rc;
/* Xmit is assumed to be disabled */
/* Free up resources already held */
tree_mask = NIX_TM_TREE_MASK_ALL;
rc = nix_tm_free_resources(roc_nix, tree_mask, false);
if (rc)
plt_err("Failed to freeup existing nodes or rsrcs, rc=%d", rc);
/* Free all other hw resources */
req = mbox_alloc_msg_nix_txsch_free(mbox);
if (req == NULL)
return;
req->flags = TXSCHQ_FREE_ALL;
rc = mbox_process(mbox);
if (rc)
plt_err("Failed to freeup all res, rc=%d", rc);
for (hw_lvl = 0; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
plt_bitmap_reset(nix->schq_bmp[hw_lvl]);
plt_bitmap_reset(nix->schq_contig_bmp[hw_lvl]);
nix->contig_rsvd[hw_lvl] = 0;
nix->discontig_rsvd[hw_lvl] = 0;
}
/* Clear shaper profiles */
nix_tm_clear_shaper_profiles(nix);
nix->tm_tree = 0;
nix->tm_flags &= ~NIX_TM_HIERARCHY_ENA;
}

View File

@ -104,9 +104,11 @@ INTERNAL {
roc_nix_xstats_names_get;
roc_nix_switch_hdr_set;
roc_nix_eeprom_info_get;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
roc_nix_tm_hierarchy_disable;
roc_nix_tm_hierarchy_enable;
roc_nix_tm_init;
roc_nix_tm_node_add;
roc_nix_tm_node_delete;
roc_nix_tm_node_get;
@ -114,6 +116,7 @@ INTERNAL {
roc_nix_tm_node_name_get;
roc_nix_tm_node_next;
roc_nix_tm_node_pkt_mode_update;
roc_nix_tm_rlimit_sq;
roc_nix_tm_shaper_profile_add;
roc_nix_tm_shaper_profile_delete;
roc_nix_tm_shaper_profile_get;