net/octeontx2: restructure TM helper functions

Restructure traffic manager helper function by splitting to
multiple sets of register configurations like shaping, scheduling
and topology config.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Krzysztof Kanas <kkanas@marvell.com>
This commit is contained in:
Nithin Dabilpuram 2020-04-03 14:22:07 +05:30 committed by Ferruh Yigit
parent 1e8d75d805
commit ec8ddd4fb1
2 changed files with 411 additions and 351 deletions

View File

@ -94,52 +94,50 @@ nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
} }
static inline uint64_t static inline uint64_t
shaper_rate_to_nix(uint64_t cclk_hz, uint64_t cclk_ticks, shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
uint64_t value, uint64_t *exponent_p,
uint64_t *mantissa_p, uint64_t *div_exp_p) uint64_t *mantissa_p, uint64_t *div_exp_p)
{ {
uint64_t div_exp, exponent, mantissa; uint64_t div_exp, exponent, mantissa;
/* Boundary checks */ /* Boundary checks */
if (value < MIN_SHAPER_RATE(cclk_hz, cclk_ticks) || if (value < MIN_SHAPER_RATE ||
value > MAX_SHAPER_RATE(cclk_hz, cclk_ticks)) value > MAX_SHAPER_RATE)
return 0; return 0;
if (value <= SHAPER_RATE(cclk_hz, cclk_ticks, 0, 0, 0)) { if (value <= SHAPER_RATE(0, 0, 0)) {
/* Calculate rate div_exp and mantissa using /* Calculate rate div_exp and mantissa using
* the following formula: * the following formula:
* *
* value = (cclk_hz * (256 + mantissa) * value = (2E6 * (256 + mantissa)
* / ((cclk_ticks << div_exp) * 256) * / ((1 << div_exp) * 256))
*/ */
div_exp = 0; div_exp = 0;
exponent = 0; exponent = 0;
mantissa = MAX_RATE_MANTISSA; mantissa = MAX_RATE_MANTISSA;
while (value < (cclk_hz / (cclk_ticks << div_exp))) while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
div_exp += 1; div_exp += 1;
while (value < while (value <
((cclk_hz * (256 + mantissa)) / ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
((cclk_ticks << div_exp) * 256))) ((1 << div_exp) * 256)))
mantissa -= 1; mantissa -= 1;
} else { } else {
/* Calculate rate exponent and mantissa using /* Calculate rate exponent and mantissa using
* the following formula: * the following formula:
* *
* value = (cclk_hz * ((256 + mantissa) << exponent) * value = (2E6 * ((256 + mantissa) << exponent)) / 256
* / (cclk_ticks * 256)
* *
*/ */
div_exp = 0; div_exp = 0;
exponent = MAX_RATE_EXPONENT; exponent = MAX_RATE_EXPONENT;
mantissa = MAX_RATE_MANTISSA; mantissa = MAX_RATE_MANTISSA;
while (value < (cclk_hz * (1 << exponent)) / cclk_ticks) while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
exponent -= 1; exponent -= 1;
while (value < (cclk_hz * ((256 + mantissa) << exponent)) / while (value < ((NIX_SHAPER_RATE_CONST *
(cclk_ticks * 256)) ((256 + mantissa) << exponent)) / 256))
mantissa -= 1; mantissa -= 1;
} }
@ -155,20 +153,7 @@ shaper_rate_to_nix(uint64_t cclk_hz, uint64_t cclk_ticks,
*mantissa_p = mantissa; *mantissa_p = mantissa;
/* Calculate real rate value */ /* Calculate real rate value */
return SHAPER_RATE(cclk_hz, cclk_ticks, exponent, mantissa, div_exp); return SHAPER_RATE(exponent, mantissa, div_exp);
}
static inline uint64_t
lx_shaper_rate_to_nix(uint64_t cclk_hz, uint32_t hw_lvl,
uint64_t value, uint64_t *exponent,
uint64_t *mantissa, uint64_t *div_exp)
{
if (hw_lvl == NIX_TXSCH_LVL_TL1)
return shaper_rate_to_nix(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS,
value, exponent, mantissa, div_exp);
else
return shaper_rate_to_nix(cclk_hz, LX_TIME_WHEEL_CCLK_TICKS,
value, exponent, mantissa, div_exp);
} }
static inline uint64_t static inline uint64_t
@ -207,329 +192,394 @@ shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
return SHAPER_BURST(exponent, mantissa); return SHAPER_BURST(exponent, mantissa);
} }
static int static void
configure_shaper_cir_pir_reg(struct otx2_eth_dev *dev, shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
struct otx2_nix_tm_node *tm_node, struct shaper_params *cir,
struct shaper_params *cir, struct shaper_params *pir)
struct shaper_params *pir)
{ {
uint32_t shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; struct rte_tm_shaper_params *param = &profile->params;
struct otx2_nix_tm_shaper_profile *shaper_profile = NULL;
struct rte_tm_shaper_params *param;
shaper_profile_id = tm_node->params.shaper_profile_id; if (!profile)
return;
shaper_profile = nix_tm_shaper_profile_search(dev, shaper_profile_id); /* Calculate CIR exponent and mantissa */
if (shaper_profile) { if (param->committed.rate)
param = &shaper_profile->profile; cir->rate = shaper_rate_to_nix(param->committed.rate,
/* Calculate CIR exponent and mantissa */ &cir->exponent,
if (param->committed.rate) &cir->mantissa,
cir->rate = lx_shaper_rate_to_nix(CCLK_HZ, &cir->div_exp);
tm_node->hw_lvl_id,
param->committed.rate,
&cir->exponent,
&cir->mantissa,
&cir->div_exp);
/* Calculate PIR exponent and mantissa */ /* Calculate PIR exponent and mantissa */
if (param->peak.rate) if (param->peak.rate)
pir->rate = lx_shaper_rate_to_nix(CCLK_HZ, pir->rate = shaper_rate_to_nix(param->peak.rate,
tm_node->hw_lvl_id, &pir->exponent,
param->peak.rate, &pir->mantissa,
&pir->exponent, &pir->div_exp);
&pir->mantissa,
&pir->div_exp);
/* Calculate CIR burst exponent and mantissa */ /* Calculate CIR burst exponent and mantissa */
if (param->committed.size) if (param->committed.size)
cir->burst = shaper_burst_to_nix(param->committed.size, cir->burst = shaper_burst_to_nix(param->committed.size,
&cir->burst_exponent, &cir->burst_exponent,
&cir->burst_mantissa); &cir->burst_mantissa);
/* Calculate PIR burst exponent and mantissa */ /* Calculate PIR burst exponent and mantissa */
if (param->peak.size) if (param->peak.size)
pir->burst = shaper_burst_to_nix(param->peak.size, pir->burst = shaper_burst_to_nix(param->peak.size,
&pir->burst_exponent, &pir->burst_exponent,
&pir->burst_mantissa); &pir->burst_mantissa);
}
static int
populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
{
struct otx2_mbox *mbox = dev->mbox;
struct nix_txschq_config *req;
/*
* Default config for TL1.
* For VF this is always ignored.
*/
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = NIX_TXSCH_LVL_TL1;
/* Set DWRR quantum */
req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
req->regval[2] = 0;
req->num_regs++;
return otx2_mbox_process(mbox);
}
static uint8_t
prepare_tm_sched_reg(struct otx2_eth_dev *dev,
struct otx2_nix_tm_node *tm_node,
volatile uint64_t *reg, volatile uint64_t *regval)
{
uint64_t strict_prio = tm_node->priority;
uint32_t hw_lvl = tm_node->hw_lvl;
uint32_t schq = tm_node->hw_id;
uint64_t rr_quantum;
uint8_t k = 0;
rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
/* For children to root, strict prio is default if either
* device root is TL2 or TL1 Static Priority is disabled.
*/
if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
(dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
dev->tm_flags & NIX_TM_TL1_NO_SP))
strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
"prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
tm_node->id, strict_prio, rr_quantum, tm_node);
switch (hw_lvl) {
case NIX_TXSCH_LVL_SMQ:
reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
regval[k] = (strict_prio << 24) | rr_quantum;
k++;
break;
case NIX_TXSCH_LVL_TL4:
reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
regval[k] = (strict_prio << 24) | rr_quantum;
k++;
break;
case NIX_TXSCH_LVL_TL3:
reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
regval[k] = (strict_prio << 24) | rr_quantum;
k++;
break;
case NIX_TXSCH_LVL_TL2:
reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
regval[k] = (strict_prio << 24) | rr_quantum;
k++;
break;
case NIX_TXSCH_LVL_TL1:
reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
regval[k] = rr_quantum;
k++;
break;
} }
return 0; return k;
} }
static int static uint8_t
send_tm_reqval(struct otx2_mbox *mbox, struct nix_txschq_config *req) prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
struct otx2_nix_tm_shaper_profile *profile,
volatile uint64_t *reg, volatile uint64_t *regval)
{ {
int rc;
if (req->num_regs > MAX_REGS_PER_MBOX_MSG)
return -ERANGE;
rc = otx2_mbox_process(mbox);
if (rc)
return rc;
req->num_regs = 0;
return 0;
}
static int
populate_tm_registers(struct otx2_eth_dev *dev,
struct otx2_nix_tm_node *tm_node)
{
uint64_t strict_schedul_prio, rr_prio;
struct otx2_mbox *mbox = dev->mbox;
volatile uint64_t *reg, *regval;
uint64_t parent = 0, child = 0;
struct shaper_params cir, pir; struct shaper_params cir, pir;
struct nix_txschq_config *req; uint32_t schq = tm_node->hw_id;
uint64_t rr_quantum; uint8_t k = 0;
uint32_t hw_lvl;
uint32_t schq;
int rc;
memset(&cir, 0, sizeof(cir)); memset(&cir, 0, sizeof(cir));
memset(&pir, 0, sizeof(pir)); memset(&pir, 0, sizeof(pir));
shaper_config_to_nix(profile, &cir, &pir);
/* Skip leaf nodes */ otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
if (tm_node->hw_lvl_id == NIX_TXSCH_LVL_CNT) "pir %" PRIu64 "(%" PRIu64 "B),"
return 0; " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
tm_node->id, pir.rate, pir.burst,
cir.rate, cir.burst, tm_node);
switch (tm_node->hw_lvl) {
case NIX_TXSCH_LVL_SMQ:
/* Configure PIR, CIR */
reg[k] = NIX_AF_MDQX_PIR(schq);
regval[k] = (pir.rate && pir.burst) ?
(shaper2regval(&pir) | 1) : 0;
k++;
reg[k] = NIX_AF_MDQX_CIR(schq);
regval[k] = (cir.rate && cir.burst) ?
(shaper2regval(&cir) | 1) : 0;
k++;
/* Configure RED ALG */
reg[k] = NIX_AF_MDQX_SHAPE(schq);
regval[k] = ((uint64_t)tm_node->red_algo << 9);
k++;
break;
case NIX_TXSCH_LVL_TL4:
/* Configure PIR, CIR */
reg[k] = NIX_AF_TL4X_PIR(schq);
regval[k] = (pir.rate && pir.burst) ?
(shaper2regval(&pir) | 1) : 0;
k++;
reg[k] = NIX_AF_TL4X_CIR(schq);
regval[k] = (cir.rate && cir.burst) ?
(shaper2regval(&cir) | 1) : 0;
k++;
/* Configure RED algo */
reg[k] = NIX_AF_TL4X_SHAPE(schq);
regval[k] = ((uint64_t)tm_node->red_algo << 9);
k++;
break;
case NIX_TXSCH_LVL_TL3:
/* Configure PIR, CIR */
reg[k] = NIX_AF_TL3X_PIR(schq);
regval[k] = (pir.rate && pir.burst) ?
(shaper2regval(&pir) | 1) : 0;
k++;
reg[k] = NIX_AF_TL3X_CIR(schq);
regval[k] = (cir.rate && cir.burst) ?
(shaper2regval(&cir) | 1) : 0;
k++;
/* Configure RED algo */
reg[k] = NIX_AF_TL3X_SHAPE(schq);
regval[k] = ((uint64_t)tm_node->red_algo << 9);
k++;
break;
case NIX_TXSCH_LVL_TL2:
/* Configure PIR, CIR */
reg[k] = NIX_AF_TL2X_PIR(schq);
regval[k] = (pir.rate && pir.burst) ?
(shaper2regval(&pir) | 1) : 0;
k++;
reg[k] = NIX_AF_TL2X_CIR(schq);
regval[k] = (cir.rate && cir.burst) ?
(shaper2regval(&cir) | 1) : 0;
k++;
/* Configure RED algo */
reg[k] = NIX_AF_TL2X_SHAPE(schq);
regval[k] = ((uint64_t)tm_node->red_algo << 9);
k++;
break;
case NIX_TXSCH_LVL_TL1:
/* Configure CIR */
reg[k] = NIX_AF_TL1X_CIR(schq);
regval[k] = (cir.rate && cir.burst) ?
(shaper2regval(&cir) | 1) : 0;
k++;
break;
}
return k;
}
static int
populate_tm_reg(struct otx2_eth_dev *dev,
struct otx2_nix_tm_node *tm_node)
{
struct otx2_nix_tm_shaper_profile *profile;
uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
uint64_t regval[MAX_REGS_PER_MBOX_MSG];
uint64_t reg[MAX_REGS_PER_MBOX_MSG];
struct otx2_mbox *mbox = dev->mbox;
uint64_t parent = 0, child = 0;
uint32_t hw_lvl, rr_prio, schq;
struct nix_txschq_config *req;
int rc = -EFAULT;
uint8_t k = 0;
memset(regval_mask, 0, sizeof(regval_mask));
profile = nix_tm_shaper_profile_search(dev,
tm_node->params.shaper_profile_id);
rr_prio = tm_node->rr_prio;
hw_lvl = tm_node->hw_lvl;
schq = tm_node->hw_id;
/* Root node will not have a parent node */ /* Root node will not have a parent node */
if (tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) if (hw_lvl == dev->otx2_tm_root_lvl)
parent = tm_node->parent_hw_id; parent = tm_node->parent_hw_id;
else else
parent = tm_node->parent->hw_id; parent = tm_node->parent->hw_id;
/* Do we need this trigger to configure TL1 */ /* Do we need this trigger to configure TL1 */
if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 && if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) { hw_lvl == dev->otx2_tm_root_lvl) {
schq = parent; rc = populate_tm_tl1_default(dev, parent);
/*
* Default config for TL1.
* For VF this is always ignored.
*/
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = NIX_TXSCH_LVL_TL1;
/* Set DWRR quantum */
req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
req->regval[2] = 0;
req->num_regs++;
rc = send_tm_reqval(mbox, req);
if (rc) if (rc)
goto error; goto error;
} }
if (tm_node->hw_lvl_id != NIX_TXSCH_LVL_SMQ) if (hw_lvl != NIX_TXSCH_LVL_SMQ)
child = find_prio_anchor(dev, tm_node->id); child = find_prio_anchor(dev, tm_node->id);
rr_prio = tm_node->rr_prio; /* Override default rr_prio when TL1
hw_lvl = tm_node->hw_lvl_id; * Static Priority is disabled
strict_schedul_prio = tm_node->priority; */
schq = tm_node->hw_id; if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX) / dev->tm_flags & NIX_TM_TL1_NO_SP) {
MAX_SCHED_WEIGHT; rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
child = 0;
}
configure_shaper_cir_pir_reg(dev, tm_node, &cir, &pir); otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
" prio_anchor %"PRIu64" rr_prio %u (%p)",
otx2_tm_dbg("Configure node %p, lvl %u hw_lvl %u, id %u, hw_id %u," nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
"parent_hw_id %" PRIx64 ", pir %" PRIx64 ", cir %" PRIx64, parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
tm_node, tm_node->level_id, hw_lvl,
tm_node->id, schq, parent, pir.rate, cir.rate);
rc = -EFAULT;
/* Prepare Topology and Link config */
switch (hw_lvl) { switch (hw_lvl) {
case NIX_TXSCH_LVL_SMQ: case NIX_TXSCH_LVL_SMQ:
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = hw_lvl;
reg = req->reg;
regval = req->regval;
req->num_regs = 0;
/* Set xoff which will be cleared later */ /* Set xoff which will be cleared later */
*reg++ = NIX_AF_SMQX_CFG(schq); reg[k] = NIX_AF_SMQX_CFG(schq);
*regval++ = BIT_ULL(50) | ((uint64_t)NIX_MAX_VTAG_INS << 36) | regval[k] = BIT_ULL(50);
(NIX_MAX_HW_FRS << 8) | NIX_MIN_HW_FRS; regval_mask[k] = ~BIT_ULL(50);
req->num_regs++; k++;
*reg++ = NIX_AF_MDQX_PARENT(schq);
*regval++ = parent << 16;
req->num_regs++;
*reg++ = NIX_AF_MDQX_SCHEDULE(schq);
*regval++ = (strict_schedul_prio << 24) | rr_quantum;
req->num_regs++;
if (pir.rate && pir.burst) {
*reg++ = NIX_AF_MDQX_PIR(schq);
*regval++ = shaper2regval(&pir) | 1;
req->num_regs++;
}
if (cir.rate && cir.burst) { /* Parent and schedule conf */
*reg++ = NIX_AF_MDQX_CIR(schq); reg[k] = NIX_AF_MDQX_PARENT(schq);
*regval++ = shaper2regval(&cir) | 1; regval[k] = parent << 16;
req->num_regs++; k++;
}
rc = send_tm_reqval(mbox, req);
if (rc)
goto error;
break; break;
case NIX_TXSCH_LVL_TL4: case NIX_TXSCH_LVL_TL4:
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); /* Parent and schedule conf */
req->lvl = hw_lvl; reg[k] = NIX_AF_TL4X_PARENT(schq);
req->num_regs = 0; regval[k] = parent << 16;
reg = req->reg; k++;
regval = req->regval;
reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
regval[k] = (child << 32) | (rr_prio << 1);
k++;
*reg++ = NIX_AF_TL4X_PARENT(schq);
*regval++ = parent << 16;
req->num_regs++;
*reg++ = NIX_AF_TL4X_TOPOLOGY(schq);
*regval++ = (child << 32) | (rr_prio << 1);
req->num_regs++;
*reg++ = NIX_AF_TL4X_SCHEDULE(schq);
*regval++ = (strict_schedul_prio << 24) | rr_quantum;
req->num_regs++;
if (pir.rate && pir.burst) {
*reg++ = NIX_AF_TL4X_PIR(schq);
*regval++ = shaper2regval(&pir) | 1;
req->num_regs++;
}
if (cir.rate && cir.burst) {
*reg++ = NIX_AF_TL4X_CIR(schq);
*regval++ = shaper2regval(&cir) | 1;
req->num_regs++;
}
/* Configure TL4 to send to SDP channel instead of CGX/LBK */ /* Configure TL4 to send to SDP channel instead of CGX/LBK */
if (otx2_dev_is_sdp(dev)) { if (otx2_dev_is_sdp(dev)) {
*reg++ = NIX_AF_TL4X_SDP_LINK_CFG(schq); reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
*regval++ = BIT_ULL(12); regval[k] = BIT_ULL(12);
req->num_regs++; k++;
} }
rc = send_tm_reqval(mbox, req);
if (rc)
goto error;
break; break;
case NIX_TXSCH_LVL_TL3: case NIX_TXSCH_LVL_TL3:
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); /* Parent and schedule conf */
req->lvl = hw_lvl; reg[k] = NIX_AF_TL3X_PARENT(schq);
req->num_regs = 0; regval[k] = parent << 16;
reg = req->reg; k++;
regval = req->regval;
*reg++ = NIX_AF_TL3X_PARENT(schq); reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
*regval++ = parent << 16; regval[k] = (child << 32) | (rr_prio << 1);
req->num_regs++; k++;
*reg++ = NIX_AF_TL3X_TOPOLOGY(schq);
*regval++ = (child << 32) | (rr_prio << 1);
req->num_regs++;
*reg++ = NIX_AF_TL3X_SCHEDULE(schq);
*regval++ = (strict_schedul_prio << 24) | rr_quantum;
req->num_regs++;
/* Link configuration */ /* Link configuration */
if (!otx2_dev_is_sdp(dev) && if (!otx2_dev_is_sdp(dev) &&
dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) { dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
*reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq, reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
nix_get_link(dev)); nix_get_link(dev));
*regval++ = BIT_ULL(12) | nix_get_relchan(dev); regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
req->num_regs++; k++;
} }
if (pir.rate && pir.burst) {
*reg++ = NIX_AF_TL3X_PIR(schq);
*regval++ = shaper2regval(&pir) | 1;
req->num_regs++;
}
if (cir.rate && cir.burst) {
*reg++ = NIX_AF_TL3X_CIR(schq);
*regval++ = shaper2regval(&cir) | 1;
req->num_regs++;
}
rc = send_tm_reqval(mbox, req);
if (rc)
goto error;
break; break;
case NIX_TXSCH_LVL_TL2: case NIX_TXSCH_LVL_TL2:
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); /* Parent and schedule conf */
req->lvl = hw_lvl; reg[k] = NIX_AF_TL2X_PARENT(schq);
req->num_regs = 0; regval[k] = parent << 16;
reg = req->reg; k++;
regval = req->regval;
*reg++ = NIX_AF_TL2X_PARENT(schq); reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
*regval++ = parent << 16; regval[k] = (child << 32) | (rr_prio << 1);
req->num_regs++; k++;
*reg++ = NIX_AF_TL2X_TOPOLOGY(schq);
*regval++ = (child << 32) | (rr_prio << 1);
req->num_regs++;
*reg++ = NIX_AF_TL2X_SCHEDULE(schq);
if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2)
*regval++ = (1 << 24) | rr_quantum;
else
*regval++ = (strict_schedul_prio << 24) | rr_quantum;
req->num_regs++;
/* Link configuration */ /* Link configuration */
if (!otx2_dev_is_sdp(dev) && if (!otx2_dev_is_sdp(dev) &&
dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) { dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
*reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq, reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
nix_get_link(dev)); nix_get_link(dev));
*regval++ = BIT_ULL(12) | nix_get_relchan(dev); regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
req->num_regs++; k++;
}
if (pir.rate && pir.burst) {
*reg++ = NIX_AF_TL2X_PIR(schq);
*regval++ = shaper2regval(&pir) | 1;
req->num_regs++;
}
if (cir.rate && cir.burst) {
*reg++ = NIX_AF_TL2X_CIR(schq);
*regval++ = shaper2regval(&cir) | 1;
req->num_regs++;
} }
rc = send_tm_reqval(mbox, req);
if (rc)
goto error;
break; break;
case NIX_TXSCH_LVL_TL1: case NIX_TXSCH_LVL_TL1:
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
req->lvl = hw_lvl; regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
req->num_regs = 0; k++;
reg = req->reg;
regval = req->regval;
*reg++ = NIX_AF_TL1X_SCHEDULE(schq);
*regval++ = rr_quantum;
req->num_regs++;
*reg++ = NIX_AF_TL1X_TOPOLOGY(schq);
*regval++ = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
req->num_regs++;
if (cir.rate && cir.burst) {
*reg++ = NIX_AF_TL1X_CIR(schq);
*regval++ = shaper2regval(&cir) | 1;
req->num_regs++;
}
rc = send_tm_reqval(mbox, req);
if (rc)
goto error;
break; break;
} }
/* Prepare schedule config */
k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
/* Prepare shaping config */
k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
if (!k)
return 0;
/* Copy and send config mbox */
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = hw_lvl;
req->num_regs = k;
otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
rc = otx2_mbox_process(mbox);
if (rc)
goto error;
return 0; return 0;
error: error:
otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc); otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
@ -541,13 +591,14 @@ static int
nix_tm_txsch_reg_config(struct otx2_eth_dev *dev) nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
{ {
struct otx2_nix_tm_node *tm_node; struct otx2_nix_tm_node *tm_node;
uint32_t lvl; uint32_t hw_lvl;
int rc = 0; int rc = 0;
for (lvl = 0; lvl < (uint32_t)dev->otx2_tm_root_lvl + 1; lvl++) { for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
TAILQ_FOREACH(tm_node, &dev->node_list, node) { TAILQ_FOREACH(tm_node, &dev->node_list, node) {
if (tm_node->hw_lvl_id == lvl) { if (tm_node->hw_lvl == hw_lvl &&
rc = populate_tm_registers(dev, tm_node); tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
rc = populate_tm_reg(dev, tm_node);
if (rc) if (rc)
goto exit; goto exit;
} }
@ -637,8 +688,8 @@ nix_tm_update_parent_info(struct otx2_eth_dev *dev)
static int static int
nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id, nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
uint32_t parent_node_id, uint32_t priority, uint32_t parent_node_id, uint32_t priority,
uint32_t weight, uint16_t hw_lvl_id, uint32_t weight, uint16_t hw_lvl,
uint16_t level_id, bool user, uint16_t lvl, bool user,
struct rte_tm_node_params *params) struct rte_tm_node_params *params)
{ {
struct otx2_nix_tm_shaper_profile *shaper_profile; struct otx2_nix_tm_shaper_profile *shaper_profile;
@ -655,8 +706,8 @@ nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
if (!tm_node) if (!tm_node)
return -ENOMEM; return -ENOMEM;
tm_node->level_id = level_id; tm_node->lvl = lvl;
tm_node->hw_lvl_id = hw_lvl_id; tm_node->hw_lvl = hw_lvl;
tm_node->id = node_id; tm_node->id = node_id;
tm_node->priority = priority; tm_node->priority = priority;
@ -935,18 +986,18 @@ nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
continue; continue;
if (nix_tm_have_tl1_access(dev) && if (nix_tm_have_tl1_access(dev) &&
tm_node->hw_lvl_id == NIX_TXSCH_LVL_TL1) tm_node->hw_lvl == NIX_TXSCH_LVL_TL1)
skip_node = true; skip_node = true;
otx2_tm_dbg("Free hwres for node %u, hwlvl %u, hw_id %u (%p)", otx2_tm_dbg("Free hwres for node %u, hwlvl %u, hw_id %u (%p)",
tm_node->id, tm_node->hw_lvl_id, tm_node->id, tm_node->hw_lvl,
tm_node->hw_id, tm_node); tm_node->hw_id, tm_node);
/* Free specific HW resource if requested */ /* Free specific HW resource if requested */
if (!skip_node && flags_mask && if (!skip_node && flags_mask &&
tm_node->flags & NIX_TM_NODE_HWRES) { tm_node->flags & NIX_TM_NODE_HWRES) {
req = otx2_mbox_alloc_msg_nix_txsch_free(mbox); req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
req->flags = 0; req->flags = 0;
req->schq_lvl = tm_node->hw_lvl_id; req->schq_lvl = tm_node->hw_lvl;
req->schq = tm_node->hw_id; req->schq = tm_node->hw_id;
rc = otx2_mbox_process(mbox); rc = otx2_mbox_process(mbox);
if (rc) if (rc)
@ -1010,17 +1061,17 @@ nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
uint32_t l_id, schq_index; uint32_t l_id, schq_index;
otx2_tm_dbg("Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)", otx2_tm_dbg("Assign hw id for child node %u, lvl %u, hw_lvl %u (%p)",
child->id, child->level_id, child->hw_lvl_id, child); child->id, child->lvl, child->hw_lvl, child);
child->flags |= NIX_TM_NODE_HWRES; child->flags |= NIX_TM_NODE_HWRES;
/* Process root nodes */ /* Process root nodes */
if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 && if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) { child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
int idx = 0; int idx = 0;
uint32_t tschq_con_index; uint32_t tschq_con_index;
l_id = child->hw_lvl_id; l_id = child->hw_lvl;
tschq_con_index = dev->txschq_contig_index[l_id]; tschq_con_index = dev->txschq_contig_index[l_id];
hw_id = dev->txschq_contig_list[l_id][tschq_con_index]; hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
child->hw_id = hw_id; child->hw_id = hw_id;
@ -1032,10 +1083,10 @@ nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
return 0; return 0;
} }
if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 && if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
child->hw_lvl_id == dev->otx2_tm_root_lvl && !parent) { child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
uint32_t tschq_con_index; uint32_t tschq_con_index;
l_id = child->hw_lvl_id; l_id = child->hw_lvl;
tschq_con_index = dev->txschq_index[l_id]; tschq_con_index = dev->txschq_index[l_id];
hw_id = dev->txschq_list[l_id][tschq_con_index]; hw_id = dev->txschq_list[l_id][tschq_con_index];
child->hw_id = hw_id; child->hw_id = hw_id;
@ -1044,7 +1095,7 @@ nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
} }
/* Process children with parents */ /* Process children with parents */
l_id = child->hw_lvl_id; l_id = child->hw_lvl;
schq_index = dev->txschq_index[l_id]; schq_index = dev->txschq_index[l_id];
schq_con_index = dev->txschq_contig_index[l_id]; schq_con_index = dev->txschq_contig_index[l_id];
@ -1069,8 +1120,8 @@ nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) { for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
TAILQ_FOREACH(parent, &dev->node_list, node) { TAILQ_FOREACH(parent, &dev->node_list, node) {
child_hw_lvl = parent->hw_lvl_id - 1; child_hw_lvl = parent->hw_lvl - 1;
if (parent->hw_lvl_id != i) if (parent->hw_lvl != i)
continue; continue;
TAILQ_FOREACH(child, &dev->node_list, node) { TAILQ_FOREACH(child, &dev->node_list, node) {
if (!child->parent) if (!child->parent)
@ -1087,7 +1138,7 @@ nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
* Explicitly assign id to parent node if it * Explicitly assign id to parent node if it
* doesn't have a parent * doesn't have a parent
*/ */
if (parent->hw_lvl_id == dev->otx2_tm_root_lvl) if (parent->hw_lvl == dev->otx2_tm_root_lvl)
nix_tm_assign_id_to_node(dev, parent, NULL); nix_tm_assign_id_to_node(dev, parent, NULL);
} }
} }
@ -1102,7 +1153,7 @@ nix_tm_count_req_schq(struct otx2_eth_dev *dev,
uint8_t contig_count; uint8_t contig_count;
TAILQ_FOREACH(tm_node, &dev->node_list, node) { TAILQ_FOREACH(tm_node, &dev->node_list, node) {
if (lvl == tm_node->hw_lvl_id) { if (lvl == tm_node->hw_lvl) {
req->schq[lvl - 1] += tm_node->rr_num; req->schq[lvl - 1] += tm_node->rr_num;
if (tm_node->max_prio != UINT32_MAX) { if (tm_node->max_prio != UINT32_MAX) {
contig_count = tm_node->max_prio + 1; contig_count = tm_node->max_prio + 1;
@ -1111,7 +1162,7 @@ nix_tm_count_req_schq(struct otx2_eth_dev *dev,
} }
if (lvl == dev->otx2_tm_root_lvl && if (lvl == dev->otx2_tm_root_lvl &&
dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 && dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
tm_node->hw_lvl_id == dev->otx2_tm_root_lvl) { tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
req->schq_contig[dev->otx2_tm_root_lvl]++; req->schq_contig[dev->otx2_tm_root_lvl]++;
} }
} }
@ -1192,7 +1243,7 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
continue; continue;
/* Enable xmit on sq */ /* Enable xmit on sq */
if (tm_node->level_id != OTX2_TM_LVL_QUEUE) { if (tm_node->lvl != OTX2_TM_LVL_QUEUE) {
tm_node->flags |= NIX_TM_NODE_ENABLED; tm_node->flags |= NIX_TM_NODE_ENABLED;
continue; continue;
} }
@ -1210,8 +1261,7 @@ nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
txq = eth_dev->data->tx_queues[sq]; txq = eth_dev->data->tx_queues[sq];
smq = tm_node->parent->hw_id; smq = tm_node->parent->hw_id;
rr_quantum = (tm_node->weight * rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT;
rc = nix_tm_sw_xon(txq, smq, rr_quantum); rc = nix_tm_sw_xon(txq, smq, rr_quantum);
if (rc) if (rc)
@ -1332,6 +1382,7 @@ void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev) int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
{ {
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
uint16_t sq_cnt = eth_dev->data->nb_tx_queues; uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
int rc; int rc;
@ -1347,6 +1398,13 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
nix_tm_clear_shaper_profiles(dev); nix_tm_clear_shaper_profiles(dev);
dev->tm_flags = NIX_TM_DEFAULT_TREE; dev->tm_flags = NIX_TM_DEFAULT_TREE;
/* Disable TL1 Static Priority when VF's are enabled
* as otherwise VF's TL2 reallocation will be needed
* runtime to support a specific topology of PF.
*/
if (pci_dev->max_vfs)
dev->tm_flags |= NIX_TM_TL1_NO_SP;
rc = nix_tm_prepare_default_tree(eth_dev); rc = nix_tm_prepare_default_tree(eth_dev);
if (rc != 0) if (rc != 0)
return rc; return rc;
@ -1397,15 +1455,14 @@ otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
tm_node = nix_tm_node_search(dev, sq, true); tm_node = nix_tm_node_search(dev, sq, true);
/* Check if we found a valid leaf node */ /* Check if we found a valid leaf node */
if (!tm_node || tm_node->level_id != OTX2_TM_LVL_QUEUE || if (!tm_node || tm_node->lvl != OTX2_TM_LVL_QUEUE ||
!tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) { !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
return -EIO; return -EIO;
} }
/* Get SMQ Id of leaf node's parent */ /* Get SMQ Id of leaf node's parent */
*smq = tm_node->parent->hw_id; *smq = tm_node->parent->hw_id;
*rr_quantum = (tm_node->weight * NIX_TM_RR_QUANTUM_MAX) *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
/ MAX_SCHED_WEIGHT;
rc = nix_smq_xoff(dev, *smq, false); rc = nix_smq_xoff(dev, *smq, false);
if (rc) if (rc)

View File

@ -10,6 +10,7 @@
#include <rte_tm_driver.h> #include <rte_tm_driver.h>
#define NIX_TM_DEFAULT_TREE BIT_ULL(0) #define NIX_TM_DEFAULT_TREE BIT_ULL(0)
#define NIX_TM_TL1_NO_SP BIT_ULL(3)
struct otx2_eth_dev; struct otx2_eth_dev;
@ -27,16 +28,18 @@ struct otx2_nix_tm_node {
uint32_t hw_id; uint32_t hw_id;
uint32_t priority; uint32_t priority;
uint32_t weight; uint32_t weight;
uint16_t level_id; uint16_t lvl;
uint16_t hw_lvl_id; uint16_t hw_lvl;
uint32_t rr_prio; uint32_t rr_prio;
uint32_t rr_num; uint32_t rr_num;
uint32_t max_prio; uint32_t max_prio;
uint32_t parent_hw_id; uint32_t parent_hw_id;
uint32_t flags; uint32_t flags:16;
#define NIX_TM_NODE_HWRES BIT_ULL(0) #define NIX_TM_NODE_HWRES BIT_ULL(0)
#define NIX_TM_NODE_ENABLED BIT_ULL(1) #define NIX_TM_NODE_ENABLED BIT_ULL(1)
#define NIX_TM_NODE_USER BIT_ULL(2) #define NIX_TM_NODE_USER BIT_ULL(2)
/* Shaper algorithm for RED state @NIX_REDALG_E */
uint32_t red_algo:2;
struct otx2_nix_tm_node *parent; struct otx2_nix_tm_node *parent;
struct rte_tm_node_params params; struct rte_tm_node_params params;
}; };
@ -45,7 +48,7 @@ struct otx2_nix_tm_shaper_profile {
TAILQ_ENTRY(otx2_nix_tm_shaper_profile) shaper; TAILQ_ENTRY(otx2_nix_tm_shaper_profile) shaper;
uint32_t shaper_profile_id; uint32_t shaper_profile_id;
uint32_t reference_count; uint32_t reference_count;
struct rte_tm_shaper_params profile; struct rte_tm_shaper_params params; /* Rate in bits/sec */
}; };
struct shaper_params { struct shaper_params {
@ -63,6 +66,10 @@ TAILQ_HEAD(otx2_nix_tm_shaper_profile_list, otx2_nix_tm_shaper_profile);
#define MAX_SCHED_WEIGHT ((uint8_t)~0) #define MAX_SCHED_WEIGHT ((uint8_t)~0)
#define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1) #define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1)
#define NIX_TM_WEIGHT_TO_RR_QUANTUM(__weight) \
((((__weight) & MAX_SCHED_WEIGHT) * \
NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT)
/* DEFAULT_RR_WEIGHT * NIX_TM_RR_QUANTUM_MAX / MAX_SCHED_WEIGHT */ /* DEFAULT_RR_WEIGHT * NIX_TM_RR_QUANTUM_MAX / MAX_SCHED_WEIGHT */
/* = NIX_MAX_HW_MTU */ /* = NIX_MAX_HW_MTU */
@ -73,52 +80,27 @@ TAILQ_HEAD(otx2_nix_tm_shaper_profile_list, otx2_nix_tm_shaper_profile);
#define MAX_RATE_EXPONENT 0xf #define MAX_RATE_EXPONENT 0xf
#define MAX_RATE_MANTISSA 0xff #define MAX_RATE_MANTISSA 0xff
/** NIX rate limiter time-wheel resolution */ #define NIX_SHAPER_RATE_CONST ((uint64_t)2E6)
#define L1_TIME_WHEEL_CCLK_TICKS 240
#define LX_TIME_WHEEL_CCLK_TICKS 860
#define CCLK_HZ 1000000000 /* NIX rate calculation in Bits/Sec
/* NIX rate calculation
* CCLK = coprocessor-clock frequency in MHz
* CCLK_TICKS = rate limiter time-wheel resolution
*
* PIR_ADD = ((256 + NIX_*_PIR[RATE_MANTISSA]) * PIR_ADD = ((256 + NIX_*_PIR[RATE_MANTISSA])
* << NIX_*_PIR[RATE_EXPONENT]) / 256 * << NIX_*_PIR[RATE_EXPONENT]) / 256
* PIR = (CCLK / (CCLK_TICKS << NIX_*_PIR[RATE_DIVIDER_EXPONENT])) * PIR = (2E6 * PIR_ADD / (1 << NIX_*_PIR[RATE_DIVIDER_EXPONENT]))
* * PIR_ADD
* *
* CIR_ADD = ((256 + NIX_*_CIR[RATE_MANTISSA]) * CIR_ADD = ((256 + NIX_*_CIR[RATE_MANTISSA])
* << NIX_*_CIR[RATE_EXPONENT]) / 256 * << NIX_*_CIR[RATE_EXPONENT]) / 256
* CIR = (CCLK / (CCLK_TICKS << NIX_*_CIR[RATE_DIVIDER_EXPONENT])) * CIR = (2E6 * CIR_ADD / (CCLK_TICKS << NIX_*_CIR[RATE_DIVIDER_EXPONENT]))
* * CIR_ADD
*/ */
#define SHAPER_RATE(cclk_hz, cclk_ticks, \ #define SHAPER_RATE(exponent, mantissa, div_exp) \
exponent, mantissa, div_exp) \ ((NIX_SHAPER_RATE_CONST * ((256 + (mantissa)) << (exponent)))\
(((uint64_t)(cclk_hz) * ((256 + (mantissa)) << (exponent))) \ / (((1ull << (div_exp)) * 256)))
/ (((cclk_ticks) << (div_exp)) * 256))
#define L1_SHAPER_RATE(cclk_hz, exponent, mantissa, div_exp) \ /* 96xx rate limits in Bits/Sec */
SHAPER_RATE(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS, \ #define MIN_SHAPER_RATE \
exponent, mantissa, div_exp) SHAPER_RATE(0, 0, MAX_RATE_DIV_EXP)
#define LX_SHAPER_RATE(cclk_hz, exponent, mantissa, div_exp) \ #define MAX_SHAPER_RATE \
SHAPER_RATE(cclk_hz, LX_TIME_WHEEL_CCLK_TICKS, \ SHAPER_RATE(MAX_RATE_EXPONENT, MAX_RATE_MANTISSA, 0)
exponent, mantissa, div_exp)
/* Shaper rate limits */
#define MIN_SHAPER_RATE(cclk_hz, cclk_ticks) \
SHAPER_RATE(cclk_hz, cclk_ticks, 0, 0, MAX_RATE_DIV_EXP)
#define MAX_SHAPER_RATE(cclk_hz, cclk_ticks) \
SHAPER_RATE(cclk_hz, cclk_ticks, MAX_RATE_EXPONENT, \
MAX_RATE_MANTISSA, 0)
#define MIN_L1_SHAPER_RATE(cclk_hz) \
MIN_SHAPER_RATE(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS)
#define MAX_L1_SHAPER_RATE(cclk_hz) \
MAX_SHAPER_RATE(cclk_hz, L1_TIME_WHEEL_CCLK_TICKS)
/** TM Shaper - low level operations */ /** TM Shaper - low level operations */
@ -150,4 +132,25 @@ TAILQ_HEAD(otx2_nix_tm_shaper_profile_list, otx2_nix_tm_shaper_profile);
#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1) #define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_PRIO 1 #define TXSCH_TL1_DFLT_RR_PRIO 1
static inline const char *
nix_hwlvl2str(uint32_t hw_lvl)
{
switch (hw_lvl) {
case NIX_TXSCH_LVL_MDQ:
return "SMQ/MDQ";
case NIX_TXSCH_LVL_TL4:
return "TL4";
case NIX_TXSCH_LVL_TL3:
return "TL3";
case NIX_TXSCH_LVL_TL2:
return "TL2";
case NIX_TXSCH_LVL_TL1:
return "TL1";
default:
break;
}
return "???";
}
#endif /* __OTX2_TM_H__ */ #endif /* __OTX2_TM_H__ */