net/octeontx2: add Tx queue rate limit

Add Tx queue ratelimiting support. This support is mutually
exclusive with TM support i.e when TM is configured, tx queue
ratelimiting config is no more valid.

Signed-off-by: Krzysztof Kanas <kkanas@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
This commit is contained in:
Krzysztof Kanas 2020-04-03 14:22:15 +05:30 committed by Ferruh Yigit
parent c3f733efd4
commit a3147ae9af
6 changed files with 246 additions and 2 deletions

View File

@ -31,6 +31,7 @@ Inline protocol = Y
VLAN filter = Y
Flow control = Y
Flow API = Y
Rate limitation = Y
Jumbo frame = Y
Scattered Rx = Y
VLAN offload = Y

View File

@ -28,6 +28,7 @@ Inner RSS = Y
VLAN filter = Y
Flow control = Y
Flow API = Y
Rate limitation = Y
Jumbo frame = Y
VLAN offload = Y
QinQ offload = Y

View File

@ -24,6 +24,7 @@ Inner RSS = Y
Inline protocol = Y
VLAN filter = Y
Flow API = Y
Rate limitation = Y
Jumbo frame = Y
Scattered Rx = Y
VLAN offload = Y

View File

@ -2082,6 +2082,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = {
.rx_descriptor_status = otx2_nix_rx_descriptor_status,
.tx_descriptor_status = otx2_nix_tx_descriptor_status,
.tx_done_cleanup = otx2_nix_tx_done_cleanup,
.set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
.pool_ops_supported = otx2_nix_pool_ops_supported,
.filter_ctrl = otx2_nix_dev_filter_ctrl,
.get_module_info = otx2_nix_get_module_info,

View File

@ -2204,14 +2204,15 @@ otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
}
/* Delete default/ratelimit tree */
if (dev->tm_flags & (NIX_TM_DEFAULT_TREE)) {
if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
if (rc) {
error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
error->message = "failed to free default resources";
return rc;
}
dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE);
dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
NIX_TM_RATE_LIMIT_TREE);
}
/* Free up user alloc'ed resources */
@ -2673,6 +2674,242 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
return 0;
}
static int
nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
uint32_t def = eth_dev->data->nb_tx_queues;
struct rte_tm_node_params params;
uint32_t leaf_parent, i, rc = 0;
memset(&params, 0, sizeof(params));
if (nix_tm_have_tl1_access(dev)) {
dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_TL1,
OTX2_TM_LVL_ROOT, false, &params);
if (rc)
goto error;
rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_TL2,
OTX2_TM_LVL_SCH1, false, &params);
if (rc)
goto error;
rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_TL3,
OTX2_TM_LVL_SCH2, false, &params);
if (rc)
goto error;
rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_TL4,
OTX2_TM_LVL_SCH3, false, &params);
if (rc)
goto error;
leaf_parent = def + 3;
/* Add per queue SMQ nodes */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
leaf_parent,
0, DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_SMQ,
OTX2_TM_LVL_SCH4,
false, &params);
if (rc)
goto error;
}
/* Add leaf nodes */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
rc = nix_tm_node_add_to_list(dev, i,
leaf_parent + 1 + i, 0,
DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_CNT,
OTX2_TM_LVL_QUEUE,
false, &params);
if (rc)
goto error;
}
return 0;
}
dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
OTX2_TM_LVL_ROOT, false, &params);
if (rc)
goto error;
rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
OTX2_TM_LVL_SCH1, false, &params);
if (rc)
goto error;
rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
OTX2_TM_LVL_SCH2, false, &params);
if (rc)
goto error;
leaf_parent = def + 2;
/* Add per queue SMQ nodes */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
leaf_parent,
0, DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_SMQ,
OTX2_TM_LVL_SCH3,
false, &params);
if (rc)
goto error;
}
/* Add leaf nodes */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
DEFAULT_RR_WEIGHT,
NIX_TXSCH_LVL_CNT,
OTX2_TM_LVL_SCH4,
false, &params);
if (rc)
break;
}
error:
return rc;
}
static int
otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
struct otx2_nix_tm_node *tm_node,
uint64_t tx_rate)
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct otx2_nix_tm_shaper_profile profile;
struct otx2_mbox *mbox = dev->mbox;
volatile uint64_t *reg, *regval;
struct nix_txschq_config *req;
uint16_t flags;
uint8_t k = 0;
int rc;
flags = tm_node->flags;
req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
req->lvl = NIX_TXSCH_LVL_MDQ;
reg = req->reg;
regval = req->regval;
if (tx_rate == 0) {
k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
flags &= ~NIX_TM_NODE_ENABLED;
goto exit;
}
if (!(flags & NIX_TM_NODE_ENABLED)) {
k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
flags |= NIX_TM_NODE_ENABLED;
}
/* Use only PIR for rate limit */
memset(&profile, 0, sizeof(profile));
profile.params.peak.rate = tx_rate;
/* Minimum burst of ~4us Bytes of Tx */
profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
(4ull * tx_rate) / (1E6 * 8));
if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
dev->tm_rate_min = tx_rate;
k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
exit:
req->num_regs = k;
rc = otx2_mbox_process(mbox);
if (rc)
return rc;
tm_node->flags = flags;
return 0;
}
int
otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
uint16_t queue_idx, uint16_t tx_rate_mbps)
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
struct otx2_nix_tm_node *tm_node;
int rc;
/* Check for supported revisions */
if (otx2_dev_is_95xx_Ax(dev) ||
otx2_dev_is_96xx_Ax(dev))
return -EINVAL;
if (queue_idx >= eth_dev->data->nb_tx_queues)
return -EINVAL;
if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
!(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
goto error;
if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
eth_dev->data->nb_tx_queues > 1) {
/* For TM topology change ethdev needs to be stopped */
if (eth_dev->data->dev_started)
return -EBUSY;
/*
* Disable xmit will be enabled when
* new topology is available.
*/
rc = nix_xmit_disable(eth_dev);
if (rc) {
otx2_err("failed to disable TX, rc=%d", rc);
return -EIO;
}
rc = nix_tm_free_resources(dev, 0, 0, false);
if (rc < 0) {
otx2_tm_dbg("failed to free default resources, rc %d",
rc);
return -EIO;
}
rc = nix_tm_prepare_rate_limited_tree(eth_dev);
if (rc < 0) {
otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
return rc;
}
rc = nix_tm_alloc_resources(eth_dev, true);
if (rc != 0) {
otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
return rc;
}
dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
}
tm_node = nix_tm_node_search(dev, queue_idx, false);
/* check if we found a valid leaf node */
if (!tm_node ||
!nix_tm_is_leaf(dev, tm_node->lvl) ||
!tm_node->parent ||
tm_node->parent->hw_id == UINT32_MAX)
return -EIO;
return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
error:
otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
return -EINVAL;
}
int
otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
{

View File

@ -11,6 +11,7 @@
#define NIX_TM_DEFAULT_TREE BIT_ULL(0)
#define NIX_TM_COMMITTED BIT_ULL(1)
#define NIX_TM_RATE_LIMIT_TREE BIT_ULL(2)
#define NIX_TM_TL1_NO_SP BIT_ULL(3)
struct otx2_eth_dev;
@ -20,6 +21,8 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev);
int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev);
int otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
uint32_t *rr_quantum, uint16_t *smq);
int otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
uint16_t queue_idx, uint16_t tx_rate);
int otx2_nix_sq_flush_pre(void *_txq, bool dev_started);
int otx2_nix_sq_flush_post(void *_txq);
int otx2_nix_sq_enable(void *_txq);