net/dpaa2: support level 2 in traffic management

This patch adds support for level 2 for QoS shaping.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
Gagandeep Singh 2022-01-03 15:31:21 +05:30 committed by Ferruh Yigit
parent ed1cdbed6a
commit 72100f0dee
8 changed files with 794 additions and 355 deletions

View File

@ -588,7 +588,7 @@ Supported Features
The following capabilities are supported:
- Level0 (root node) and Level1 are supported.
- Level0 (root node), Level1 and Level2 are supported.
- 1 private shaper at root node (port level) is supported.
- 8 TX queues per port supported (1 channel per port)
- Both SP and WFQ scheduling mechanisms are supported on all 8 queues.

View File

@ -852,6 +852,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct dpni_queue tx_conf_cfg;
struct dpni_queue tx_flow_cfg;
uint8_t options = 0, flow_id;
uint16_t channel_id;
struct dpni_queue_id qid;
uint32_t tc_id;
int ret;
@ -877,20 +878,6 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
tc_id = tx_queue_id;
flow_id = 0;
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
tc_id, flow_id, options, &tx_flow_cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the tx flow: "
"tc_id=%d, flow=%d err=%d",
tc_id, flow_id, ret);
return -1;
}
dpaa2_q->flow_id = flow_id;
if (tx_queue_id == 0) {
/*Set tx-conf and error configuration*/
if (priv->flags & DPAA2_TX_CONF_ENABLE)
@ -907,10 +894,26 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
}
tc_id = tx_queue_id % priv->num_tx_tc;
channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
flow_id = 0;
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the tx flow: "
"tc_id=%d, flow=%d err=%d",
tc_id, flow_id, ret);
return -1;
}
dpaa2_q->flow_id = flow_id;
dpaa2_q->tc_index = tc_id;
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, dpaa2_q->tc_index,
DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
dpaa2_q->flow_id, &tx_flow_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@ -942,7 +945,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
priv->token,
DPNI_QUEUE_TX,
tc_id,
((channel_id << 8) | tc_id),
&cong_notif_cfg);
if (ret) {
DPAA2_PMD_ERR(
@ -959,7 +962,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
options = options | DPNI_QUEUE_OPT_USER_CTX;
tx_conf_cfg.user_context = (size_t)(dpaa2_q);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the tx conf flow: "
@ -970,7 +973,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@ -1152,7 +1155,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
struct dpni_queue cfg;
struct dpni_error_cfg err_cfg;
uint16_t qdid;
struct dpni_queue_id qid;
struct dpaa2_queue *dpaa2_q;
int ret, i;
@ -1162,7 +1164,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
intr_handle = dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
@ -1173,14 +1174,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
/* Power up the phy. Needed to make the link go UP */
dpaa2_dev_set_link_up(dev);
ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &qdid);
if (ret) {
DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
return ret;
}
priv->qdid = qdid;
for (i = 0; i < data->nb_rx_queues; i++) {
dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
@ -2619,9 +2612,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
priv->num_rx_tc = attr.num_rx_tcs;
priv->num_tx_tc = attr.num_tx_tcs;
priv->qos_entries = attr.qos_entries;
priv->fs_entries = attr.fs_entries;
priv->dist_queues = attr.num_queues;
priv->num_channels = attr.num_channels;
priv->channel_inuse = 0;
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
@ -2635,8 +2631,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
for (i = 0; i < attr.num_rx_tcs; i++)
priv->nb_rx_queues += attr.num_queues;
/* Using number of TX queues as number of TX TCs */
priv->nb_tx_queues = attr.num_tx_tcs;
priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
priv->num_rx_tc, priv->nb_rx_queues,

View File

@ -25,6 +25,7 @@
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 16
#define MAX_DPNI 8
#define DPAA2_MAX_CHANNELS 16
#define DPAA2_RX_DEFAULT_NBDESC 512
@ -160,15 +161,17 @@ struct dpaa2_dev_priv {
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
void *tx_conf_vq[MAX_TX_QUEUES];
void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
void *rx_err_vq;
uint8_t flags; /*dpaa2 config flags */
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
uint8_t num_tx_tc;
uint16_t qos_entries;
uint16_t fs_entries;
uint8_t dist_queues;
uint8_t num_channels;
uint8_t en_ordered;
uint8_t en_loose_ordered;
uint8_t max_cgs;
@ -190,6 +193,7 @@ struct dpaa2_dev_priv {
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
LIST_HEAD(nodes, dpaa2_tm_node) nodes;
LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2020 NXP
* Copyright 2020-2021 NXP
*/
#include <rte_ethdev.h>
@ -7,12 +7,16 @@
#include <rte_tm_driver.h>
#include "dpaa2_ethdev.h"
#include "dpaa2_pmd_logs.h"
#include <dpaa2_hw_dpio.h>
#define DPAA2_BURST_MAX (64 * 1024)
#define DPAA2_SHAPER_MIN_RATE 0
#define DPAA2_SHAPER_MAX_RATE 107374182400ull
#define DPAA2_WEIGHT_MAX 24701
#define DPAA2_PKT_ADJUST_LEN_MIN 0
#define DPAA2_PKT_ADJUST_LEN_MAX 0x7ff
int
dpaa2_tm_init(struct rte_eth_dev *dev)
@ -66,6 +70,8 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@ -73,27 +79,31 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
/* root node(port) + txqs number, assuming each TX
/* root node(port) + channels + txqs number, assuming each TX
* Queue is mapped to each TC
*/
cap->n_nodes_max = 1 + dev->data->nb_tx_queues;
cap->n_levels_max = 2; /* port level + txqs level */
cap->n_nodes_max = 1 + priv->num_channels + dev->data->nb_tx_queues;
cap->n_levels_max = MAX_LEVEL;
cap->non_leaf_nodes_identical = 1;
cap->leaf_nodes_identical = 1;
cap->shaper_n_max = 1;
cap->shaper_private_n_max = 1;
cap->shaper_private_dual_rate_n_max = 1;
cap->shaper_n_max = 1 + priv->num_channels; /* LNI + channels */
cap->shaper_private_n_max = 1 + priv->num_channels;
cap->shaper_private_dual_rate_n_max = 1 + priv->num_channels;
cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
cap->shaper_pkt_length_adjust_min = DPAA2_PKT_ADJUST_LEN_MIN;
cap->shaper_pkt_length_adjust_max = DPAA2_PKT_ADJUST_LEN_MAX;
cap->sched_n_children_max = dev->data->nb_tx_queues;
cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
if (priv->num_channels > DPNI_MAX_TC)
cap->sched_n_children_max = priv->num_channels;
else
cap->sched_n_children_max = DPNI_MAX_TC;
cap->sched_sp_n_priorities_max = DPNI_MAX_TC;
cap->sched_wfq_n_children_per_group_max = DPNI_MAX_TC;
cap->sched_wfq_n_groups_max = 2;
cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_STATS;
cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
return 0;
@ -105,6 +115,8 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@ -112,12 +124,12 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
if (level_id > 1)
if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
if (level_id == 0) { /* Root node */
if (level_id == LNI_LEVEL) { /* Root node (LNI) */
cap->n_nodes_max = 1;
cap->n_nodes_nonleaf_max = 1;
cap->non_leaf_nodes_identical = 1;
@ -127,20 +139,39 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
cap->nonleaf.sched_n_children_max = priv->num_channels; /* no. of channels */
cap->nonleaf.sched_sp_n_priorities_max = 1;
cap->nonleaf.sched_wfq_n_children_per_group_max =
dev->data->nb_tx_queues;
cap->nonleaf.sched_wfq_n_groups_max = 2;
cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
cap->nonleaf.sched_wfq_n_groups_max = 1;
cap->nonleaf.sched_wfq_weight_max = 1;
cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES;
} else if (level_id == CHANNEL_LEVEL) { /* channels */
cap->n_nodes_max = priv->num_channels;
cap->n_nodes_nonleaf_max = priv->num_channels;
cap->n_nodes_leaf_max = 0;
cap->non_leaf_nodes_identical = 1;
cap->nonleaf.shaper_private_supported = 1;
cap->nonleaf.shaper_private_dual_rate_supported = 1;
cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
/* no. of class queues per channel */
cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
cap->nonleaf.sched_wfq_n_groups_max = 2;
cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else { /* leaf nodes */
cap->n_nodes_max = dev->data->nb_tx_queues;
cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
/* queues per channels * channel */
cap->n_nodes_max = priv->num_tx_tc * priv->num_channels;
cap->n_nodes_leaf_max = priv->num_tx_tc * priv->num_channels;
cap->leaf_nodes_identical = 1;
cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
cap->leaf.shaper_private_supported = 0;
cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES;
}
return 0;
@ -167,18 +198,33 @@ dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
if (node->type == 0) {
if (node->level_id == LNI_LEVEL) {
cap->shaper_private_supported = 1;
cap->shaper_private_dual_rate_supported = 1;
cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
cap->nonleaf.sched_n_children_max = priv->num_channels;
cap->nonleaf.sched_sp_n_priorities_max = 1;
cap->nonleaf.sched_wfq_n_children_per_group_max =
dev->data->nb_tx_queues;
cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
cap->nonleaf.sched_wfq_n_groups_max = 1;
cap->nonleaf.sched_wfq_weight_max = 1;
cap->stats_mask = RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES;
} else if (node->level_id == CHANNEL_LEVEL) {
cap->shaper_private_supported = 1;
cap->shaper_private_dual_rate_supported = 1;
cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
cap->nonleaf.sched_wfq_n_groups_max = 2;
cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else {
cap->stats_mask = RTE_TM_STATS_N_PKTS;
cap->stats_mask = RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES;
}
return 0;
@ -202,7 +248,7 @@ dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
*is_leaf = node->type == 1/*NODE_QUEUE*/ ? 1 : 0;
*is_leaf = node->type == LEAF_NODE ? 1 : 0;
return 0;
}
@ -257,6 +303,13 @@ dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
NULL, "Wrong shaper profile id\n");
if (params->pkt_length_adjust > DPAA2_PKT_ADJUST_LEN_MAX ||
params->pkt_length_adjust < DPAA2_PKT_ADJUST_LEN_MIN)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_CAPABILITIES,
NULL,
"Not supported pkt adjust length\n");
profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
if (profile)
return -rte_tm_error_set(error, EEXIST,
@ -318,7 +371,7 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_WEIGHT,
NULL, "Weight is out of range\n");
if (level_id != 0 && level_id != 1)
if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
@ -338,39 +391,38 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
NULL, "Shared shaper is not supported\n");
/* verify port (root node) settings */
/* verify non leaf nodes settings */
if (node_id >= dev->data->nb_tx_queues) {
if (params->nonleaf.wfq_weight_mode)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
NULL, "WFQ weight mode is not supported\n");
} else {
if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
NULL, "Private shaper not supported on leaf\n");
}
/* check leaf node */
if (level_id == QUEUE_LEVEL) {
if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
return -rte_tm_error_set(error, ENODEV,
RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
NULL, "Only taildrop is supported\n");
if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES))
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
NULL,
"Requested port stats are not supported\n");
} else if (level_id == LNI_LEVEL) {
if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES))
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
NULL,
"Requested port stats are not supported\n");
return 0;
}
if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
NULL, "Private shaper not supported on leaf\n");
if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
NULL,
"Requested stats are not supported\n");
/* check leaf node */
if (level_id == 1) {
if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
return -rte_tm_error_set(error, ENODEV,
RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
NULL, "Only taildrop is supported\n");
}
return 0;
@ -407,7 +459,7 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
}
if (parent_node_id == RTE_TM_NODE_ID_NULL) {
LIST_FOREACH(node, &priv->nodes, next) {
if (node->type != 0 /*root node*/)
if (node->level_id != LNI_LEVEL)
continue;
return -rte_tm_error_set(error, EINVAL,
@ -435,14 +487,29 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
NULL, NULL);
node->id = node_id;
node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? 0/*NODE_PORT*/ :
1/*NODE_QUEUE*/;
if (node_id > dev->data->nb_tx_queues)
node->type = NON_LEAF_NODE;
else
node->type = LEAF_NODE;
node->level_id = level_id;
if (node->level_id == CHANNEL_LEVEL) {
if (priv->channel_inuse < priv->num_channels) {
node->channel_id = priv->channel_inuse;
priv->channel_inuse++;
} else {
printf("error no channel id available\n");
}
}
if (parent) {
node->parent = parent;
parent->refcnt++;
}
/* TODO: add check if refcnt is more than supported children */
if (profile) {
node->profile = profile;
profile->refcnt++;
@ -464,6 +531,7 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_tm_node *node;
/* XXX: update it */
if (0) {
return -rte_tm_error_set(error, EPERM,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@ -493,119 +561,326 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
return 0;
}
static int
dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
{
int ret = 0;
uint32_t tc_id;
uint8_t flow_id, options = 0;
struct dpni_queue tx_flow_cfg;
struct dpni_queue_id qid;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_q;
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
tc_id = node->parent->tc_id;
node->parent->tc_id++;
flow_id = 0;
if (dpaa2_q == NULL) {
printf("Queue is not configured for node = %d\n", node->id);
return -1;
}
DPAA2_PMD_DEBUG("tc_id = %d, channel = %d\n\n", tc_id,
node->parent->channel_id);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
((node->parent->channel_id << 8) | tc_id),
flow_id, options, &tx_flow_cfg);
if (ret) {
printf("Error in setting the tx flow: "
"channel id = %d tc_id= %d, param = 0x%x "
"flow=%d err=%d\n", node->parent->channel_id, tc_id,
((node->parent->channel_id << 8) | tc_id), flow_id,
ret);
return -1;
}
dpaa2_q->flow_id = flow_id;
dpaa2_q->tc_index = tc_id;
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
dpaa2_q->flow_id, &tx_flow_cfg, &qid);
if (ret) {
printf("Error in getting LFQID err=%d", ret);
return -1;
}
dpaa2_q->fqid = qid.fqid;
/* setting congestion notification */
if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc;
/* Notify that the queue is not congested when the data in
* the queue is below this thershold.(90% of value)
*/
cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10;
cong_notif_cfg.message_ctx = 0;
cong_notif_cfg.message_iova =
(size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
cong_notif_cfg.notification_mode =
DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
DPNI_CONG_OPT_COHERENT_WRITE;
cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
priv->token,
DPNI_QUEUE_TX,
((node->parent->channel_id << 8) | tc_id),
&cong_notif_cfg);
if (ret) {
printf("Error in setting tx congestion notification: "
"err=%d", ret);
return -ret;
}
}
return 0;
}
static void
dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev,
struct dpaa2_tm_node **nodes, int n)
{
struct dpaa2_tm_node *temp_node;
int i;
if (n == 1) {
DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
nodes[n - 1]->id, nodes[n - 1]->priority,
n - 1);
dpaa2_tm_configure_queue(dev, nodes[n - 1]);
return;
}
for (i = 0; i < n - 1; i++) {
if (nodes[i]->priority > nodes[i + 1]->priority) {
temp_node = nodes[i];
nodes[i] = nodes[i + 1];
nodes[i + 1] = temp_node;
}
}
dpaa2_tm_sort_and_configure(dev, nodes, n - 1);
DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
nodes[n - 1]->id, nodes[n - 1]->priority,
n - 1);
dpaa2_tm_configure_queue(dev, nodes[n - 1]);
}
static int
dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct rte_tm_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_tm_node *node, *temp_node;
struct dpaa2_tm_node *node;
struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret;
int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
struct dpni_tx_priorities_cfg prio_cfg;
int ret, t;
memset(&prio_cfg, 0, sizeof(prio_cfg));
memset(conf, 0, sizeof(conf));
/* Populate TCs */
LIST_FOREACH(channel_node, &priv->nodes, next) {
struct dpaa2_tm_node *nodes[DPNI_MAX_TC];
int i = 0;
LIST_FOREACH(node, &priv->nodes, next) {
if (node->type == 0/*root node*/) {
if (!node->profile)
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
if (leaf_node->level_id == LNI_LEVEL ||
leaf_node->level_id == CHANNEL_LEVEL)
continue;
if (leaf_node->parent == channel_node) {
if (i >= DPNI_MAX_TC) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"More children than supported\n");
goto out;
}
nodes[i++] = leaf_node;
}
}
if (i > 0) {
DPAA2_PMD_DEBUG("Configure queues\n");
dpaa2_tm_sort_and_configure(dev, nodes, i);
}
}
/* Shaping */
LIST_FOREACH(node, &priv->nodes, next) {
if (node->type == NON_LEAF_NODE) {
if (!node->profile)
continue;
struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
uint32_t param = 0;
tx_cr_shaper.max_burst_size =
node->profile->params.committed.size;
tx_cr_shaper.rate_limit =
node->profile->params.committed.rate / (1024 * 1024);
node->profile->params.committed.rate /
(1024 * 1024);
tx_er_shaper.max_burst_size =
node->profile->params.peak.size;
tx_er_shaper.rate_limit =
node->profile->params.peak.rate / (1024 * 1024);
/* root node */
if (node->parent == NULL) {
DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u\n",
tx_cr_shaper.rate_limit,
tx_cr_shaper.max_burst_size);
param = 0x2;
param |= node->profile->params.pkt_length_adjust << 16;
} else {
DPAA2_PMD_DEBUG("Channel = %d S.rate = %u\n",
node->channel_id,
tx_cr_shaper.rate_limit);
param = (node->channel_id << 8);
}
ret = dpni_set_tx_shaping(dpni, 0, priv->token,
&tx_cr_shaper, &tx_er_shaper, 0);
&tx_cr_shaper, &tx_er_shaper, param);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
"Error in setting Shaping\n");
goto out;
}
continue;
} else { /* level 1, all leaf nodes */
if (node->id >= dev->data->nb_tx_queues) {
}
}
LIST_FOREACH(channel_node, &priv->nodes, next) {
int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
struct dpni_tx_priorities_cfg prio_cfg;
memset(&prio_cfg, 0, sizeof(prio_cfg));
memset(conf, 0, sizeof(conf));
/* Process for each channel */
if (channel_node->level_id != CHANNEL_LEVEL)
continue;
LIST_FOREACH(leaf_node, &priv->nodes, next) {
struct dpaa2_queue *leaf_dpaa2_q;
uint8_t leaf_tc_id;
if (leaf_node->level_id == LNI_LEVEL ||
leaf_node->level_id == CHANNEL_LEVEL)
continue;
/* level 2, all leaf nodes */
if (leaf_node->id >= dev->data->nb_tx_queues) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID, NULL,
"Not enough txqs configured\n");
goto out;
}
if (conf[node->id])
if (conf[leaf_node->id])
continue;
LIST_FOREACH(temp_node, &priv->nodes, next) {
if (temp_node->id == node->id ||
temp_node->type == 0)
if (leaf_node->parent != channel_node)
continue;
leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
leaf_tc_id = leaf_dpaa2_q->tc_index;
/* Process sibling leaf nodes */
LIST_FOREACH(temp_leaf_node, &priv->nodes, next) {
if (temp_leaf_node->id == leaf_node->id ||
temp_leaf_node->level_id == LNI_LEVEL ||
temp_leaf_node->level_id == CHANNEL_LEVEL)
continue;
if (conf[temp_node->id])
if (temp_leaf_node->parent != channel_node)
continue;
if (node->priority == temp_node->priority) {
if (conf[temp_leaf_node->id])
continue;
if (leaf_node->priority == temp_leaf_node->priority) {
struct dpaa2_queue *temp_leaf_dpaa2_q;
uint8_t temp_leaf_tc_id;
temp_leaf_dpaa2_q = (struct dpaa2_queue *)
dev->data->tx_queues[temp_leaf_node->id];
temp_leaf_tc_id = temp_leaf_dpaa2_q->tc_index;
if (wfq_grp == 0) {
prio_cfg.tc_sched[temp_node->id].mode =
DPNI_TX_SCHED_WEIGHTED_A;
/* DPDK support lowest weight 1
* and DPAA2 platform 100
*/
prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
temp_node->weight + 99;
prio_cfg.tc_sched[temp_leaf_tc_id].mode =
DPNI_TX_SCHED_WEIGHTED_A;
/* DPAA2 support weight in multiple of 100 */
prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
temp_leaf_node->weight * 100;
} else if (wfq_grp == 1) {
prio_cfg.tc_sched[temp_node->id].mode =
DPNI_TX_SCHED_WEIGHTED_B;
prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
temp_node->weight + 99;
prio_cfg.tc_sched[temp_leaf_tc_id].mode =
DPNI_TX_SCHED_WEIGHTED_B;
prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
temp_leaf_node->weight * 100;
} else {
/*TODO: add one more check for
* number of nodes in a group
*/
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Only 2 WFQ Groups are supported\n");
goto out;
}
conf[temp_node->id] = 1;
is_wfq_grp = 1;
conf[temp_leaf_node->id] = 1;
}
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
prio_cfg.tc_sched[node->id].mode =
DPNI_TX_SCHED_WEIGHTED_A;
prio_cfg.tc_sched[node->id].delta_bandwidth =
node->weight + 99;
prio_cfg.prio_group_A = node->priority;
prio_cfg.tc_sched[leaf_tc_id].mode =
DPNI_TX_SCHED_WEIGHTED_A;
prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
leaf_node->weight * 100;
prio_cfg.prio_group_A = leaf_node->priority;
} else if (wfq_grp == 1) {
prio_cfg.tc_sched[node->id].mode =
DPNI_TX_SCHED_WEIGHTED_B;
prio_cfg.tc_sched[node->id].delta_bandwidth =
node->weight + 99;
prio_cfg.prio_group_B = node->priority;
prio_cfg.tc_sched[leaf_tc_id].mode =
DPNI_TX_SCHED_WEIGHTED_B;
prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
leaf_node->weight * 100;
prio_cfg.prio_group_B = leaf_node->priority;
}
wfq_grp++;
is_wfq_grp = 0;
}
conf[node->id] = 1;
conf[leaf_node->id] = 1;
}
if (wfq_grp)
if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
}
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
prio_cfg.prio_group_A = 0;
prio_cfg.prio_group_B = 1;
} else {
prio_cfg.prio_group_A = 1;
prio_cfg.prio_group_B = 0;
}
}
prio_cfg.prio_group_A = 1;
prio_cfg.channel_idx = channel_node->channel_id;
ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Scheduling Failed\n");
goto out;
goto out;
}
DPAA2_PMD_DEBUG("########################################\n");
DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
for (t = 0; t < DPNI_MAX_TC; t++) {
DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
}
DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
}
return 0;
out:
@ -617,6 +892,81 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
return ret;
}
static int
dpaa2_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
struct rte_tm_node_stats *stats, uint64_t *stats_mask,
int clear, struct rte_tm_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_tm_node *node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
union dpni_statistics value;
int ret = 0;
node = dpaa2_node_from_id(priv, node_id);
if (!node)
return -rte_tm_error_set(error, ENODEV,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
if (stats_mask)
*stats_mask = node->stats_mask;
if (!stats)
return 0;
memset(stats, 0, sizeof(*stats));
memset(&value, 0, sizeof(union dpni_statistics));
if (node->level_id == LNI_LEVEL) {
uint8_t page1 = 1;
ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
page1, 0, &value);
if (ret)
return -rte_tm_error_set(error, -ret,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Failed to read port statistics\n");
if (node->stats_mask & RTE_TM_STATS_N_PKTS)
stats->n_pkts = value.page_1.egress_all_frames;
if (node->stats_mask & RTE_TM_STATS_N_BYTES)
stats->n_bytes = value.page_1.egress_all_bytes;
if (clear) {
ret = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
return -rte_tm_error_set(error, -ret,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Failed to reset port statistics\n");
}
} else if (node->level_id == QUEUE_LEVEL) {
uint8_t page3 = 3;
struct dpaa2_queue *dpaa2_q;
dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
page3,
(node->parent->channel_id << 8 |
dpaa2_q->tc_index), &value);
if (ret)
return -rte_tm_error_set(error, -ret,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Failed to read queue statistics\n");
if (node->stats_mask & RTE_TM_STATS_N_PKTS)
stats->n_pkts = value.page_3.ceetm_dequeue_frames;
if (node->stats_mask & RTE_TM_STATS_N_BYTES)
stats->n_bytes = value.page_3.ceetm_dequeue_bytes;
} else {
return -rte_tm_error_set(error, -1,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Failed to read channel statistics\n");
}
return 0;
}
const struct rte_tm_ops dpaa2_tm_ops = {
.node_type_get = dpaa2_node_type_get,
.capabilities_get = dpaa2_capabilities_get,
@ -627,4 +977,5 @@ const struct rte_tm_ops dpaa2_tm_ops = {
.node_add = dpaa2_node_add,
.node_delete = dpaa2_node_delete,
.hierarchy_commit = dpaa2_hierarchy_commit,
.node_stats_read = dpaa2_node_stats_read,
};

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2020 NXP
* Copyright 2020-2021 NXP
*/
#ifndef _DPAA2_TM_H_
@ -7,6 +7,18 @@
#include <rte_tm.h>
enum node_type {
NON_LEAF_NODE = 0,
LEAF_NODE
};
enum level_type {
LNI_LEVEL = 0,
CHANNEL_LEVEL,
QUEUE_LEVEL,
MAX_LEVEL
};
struct dpaa2_tm_shaper_profile {
LIST_ENTRY(dpaa2_tm_shaper_profile) next;
uint32_t id;
@ -18,6 +30,9 @@ struct dpaa2_tm_node {
LIST_ENTRY(dpaa2_tm_node) next;
uint32_t id;
uint32_t type;
uint32_t level_id;
uint16_t channel_id; /* Only for level 1 nodes */
uint16_t tc_id; /* Only for level 1 nodes */
int refcnt;
struct dpaa2_tm_node *parent;
struct dpaa2_tm_shaper_profile *profile;

View File

@ -916,6 +916,44 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_get_link_cfg() - return the link configuration configured by
* dpni_set_link_cfg().
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: Link configuration from dpni object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_link_cfg *cfg)
{
struct mc_command cmd = { 0 };
struct dpni_cmd_set_link_cfg *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
cmd_flags,
token);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
rsp_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
cfg->advertising = le64_to_cpu(rsp_params->advertising);
cfg->options = le64_to_cpu(rsp_params->options);
cfg->rate = le32_to_cpu(rsp_params->rate);
return err;
}
/**
* dpni_get_link_state() - Return the link state (either up or down)
* @mc_io: Pointer to MC portal's I/O object
@ -1678,6 +1716,38 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_get_tx_confirmation_mode() - Get Tx confirmation mode
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @mode: Tx confirmation mode
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_confirmation_mode *mode)
{
struct dpni_tx_confirmation_mode *rsp_params;
struct mc_command cmd = { 0 };
int err;
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONFIRMATION_MODE,
cmd_flags,
token);
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
rsp_params = (struct dpni_tx_confirmation_mode *)cmd.params;
*mode = rsp_params->confirmation_mode;
return 0;
}
/**
* dpni_set_qos_table() - Set QoS mapping table
* @mc_io: Pointer to MC portal's I/O object
@ -2733,6 +2803,122 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
return 0;
}
int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_load_ss_cfg *cfg)
{
struct dpni_load_sw_sequence *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
cmd_flags,
token);
cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
cmd_params->dest = cfg->dest;
cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_enable_ss_cfg *cfg)
{
struct dpni_enable_sw_sequence *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
cmd_flags,
token);
cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
cmd_params->dest = cfg->dest;
cmd_params->set_start = cfg->set_start;
cmd_params->hxs = cpu_to_le16(cfg->hxs);
cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
cmd_params->param_offset = cfg->param_offset;
cmd_params->param_size = cfg->param_size;
cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_get_sw_sequence_layout() - Get the soft sequence layout
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @src: Source of the layout (WRIOP Rx or Tx)
* @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
*
* warning: After calling this function, call dpni_extract_sw_sequence_layout()
* to get the layout.
*
* Return: '0' on Success; error code otherwise.
*/
int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_soft_sequence_dest src,
uint64_t ss_layout_iova)
{
struct dpni_get_sw_sequence_layout *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
cmd_flags,
token);
cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
cmd_params->src = src;
cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_extract_sw_sequence_layout() - extract the software sequence layout
* @layout: software sequence layout
* @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
* to DMA
*
* This function has to be called after dpni_get_sw_sequence_layout
*
*/
void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
const uint8_t *sw_sequence_layout_buf)
{
const struct dpni_sw_sequence_layout_entry *ext_params;
int i;
uint16_t ss_size, ss_offset;
ext_params = (const struct dpni_sw_sequence_layout_entry *)
sw_sequence_layout_buf;
for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
ss_offset = le16_to_cpu(ext_params[i].ss_offset);
ss_size = le16_to_cpu(ext_params[i].ss_size);
if (ss_offset == 0 && ss_size == 0) {
layout->num_ss = i;
return;
}
layout->ss[i].ss_offset = ss_offset;
layout->ss[i].ss_size = ss_size;
layout->ss[i].param_offset = ext_params[i].param_offset;
layout->ss[i].param_size = ext_params[i].param_size;
}
}
/**
* dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
* @mc_io: Pointer to MC portal's I/O object
@ -2901,119 +3087,3 @@ int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
return err;
}
int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_load_ss_cfg *cfg)
{
struct dpni_load_sw_sequence *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
cmd_flags,
token);
cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
cmd_params->dest = cfg->dest;
cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_enable_ss_cfg *cfg)
{
struct dpni_enable_sw_sequence *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
cmd_flags,
token);
cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
cmd_params->dest = cfg->dest;
cmd_params->set_start = cfg->set_start;
cmd_params->hxs = cpu_to_le16(cfg->hxs);
cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
cmd_params->param_offset = cfg->param_offset;
cmd_params->param_size = cfg->param_size;
cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_get_sw_sequence_layout() - Get the soft sequence layout
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @src: Source of the layout (WRIOP Rx or Tx)
* @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
*
* warning: After calling this function, call dpni_extract_sw_sequence_layout()
* to get the layout.
*
* Return: '0' on Success; error code otherwise.
*/
int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_soft_sequence_dest src,
uint64_t ss_layout_iova)
{
struct dpni_get_sw_sequence_layout *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
cmd_flags,
token);
cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
cmd_params->src = src;
cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_extract_sw_sequence_layout() - extract the software sequence layout
* @layout: software sequence layout
* @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
* to DMA
*
* This function has to be called after dpni_get_sw_sequence_layout
*
*/
void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
const uint8_t *sw_sequence_layout_buf)
{
const struct dpni_sw_sequence_layout_entry *ext_params;
int i;
uint16_t ss_size, ss_offset;
ext_params = (const struct dpni_sw_sequence_layout_entry *)
sw_sequence_layout_buf;
for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
ss_offset = le16_to_cpu(ext_params[i].ss_offset);
ss_size = le16_to_cpu(ext_params[i].ss_size);
if (ss_offset == 0 && ss_size == 0) {
layout->num_ss = i;
return;
}
layout->ss[i].ss_offset = ss_offset;
layout->ss[i].ss_size = ss_size;
layout->ss[i].param_offset = ext_params[i].param_offset;
layout->ss[i].param_size = ext_params[i].param_size;
}
}

View File

@ -761,6 +761,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
uint16_t token,
const struct dpni_link_cfg *cfg);
int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
struct dpni_link_cfg *cfg);
/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
@ -1709,63 +1714,6 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
uint8_t flags,
uint8_t opr_id);
/**
* When used for queue_idx in function dpni_set_rx_dist_default_queue will
* signal to dpni to drop all unclassified frames
*/
#define DPNI_FS_MISS_DROP ((uint16_t)-1)
/**
* struct dpni_rx_dist_cfg - distribution configuration
* @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
* 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
* 512,768,896,1024
* @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
* the extractions to be used for the distribution key by calling
* dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
* it can be '0'
* @enable: enable/disable the distribution.
* @tc: TC id for which distribution is set
* @fs_miss_flow_id: when packet misses all rules from flow steering table and
* hash is disabled it will be put into this queue id; use
* DPNI_FS_MISS_DROP to drop frames. The value of this field is
* used only when flow steering distribution is enabled and hash
* distribution is disabled
*/
struct dpni_rx_dist_cfg {
uint16_t dist_size;
uint64_t key_cfg_iova;
uint8_t enable;
uint8_t tc;
uint16_t fs_miss_flow_id;
};
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, const struct dpni_rx_dist_cfg *cfg);
int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, const struct dpni_rx_dist_cfg *cfg);
int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, uint16_t tpid);
int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, uint16_t tpid);
/**
* struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
* values used in current dpni object to detect 802.1q frames.
* @tpid1: first tag. Not used if zero.
* @tpid2: second tag. Not used if zero.
*/
struct dpni_custom_tpid_cfg {
uint16_t tpid1;
uint16_t tpid2;
};
int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, struct dpni_custom_tpid_cfg *tpid);
/**
* enum dpni_soft_sequence_dest - Enumeration of WRIOP software sequence
* destinations
@ -1936,4 +1884,61 @@ int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
int dpni_get_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, struct dpni_port_cfg *port_cfg);
/**
* When used for queue_idx in function dpni_set_rx_dist_default_queue will
* signal to dpni to drop all unclassified frames
*/
#define DPNI_FS_MISS_DROP ((uint16_t)-1)
/**
* struct dpni_rx_dist_cfg - distribution configuration
* @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
* 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
* 512,768,896,1024
* @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
* the extractions to be used for the distribution key by calling
* dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
* it can be '0'
* @enable: enable/disable the distribution.
* @tc: TC id for which distribution is set
* @fs_miss_flow_id: when packet misses all rules from flow steering table and
* hash is disabled it will be put into this queue id; use
* DPNI_FS_MISS_DROP to drop frames. The value of this field is
* used only when flow steering distribution is enabled and hash
* distribution is disabled
*/
struct dpni_rx_dist_cfg {
uint16_t dist_size;
uint64_t key_cfg_iova;
uint8_t enable;
uint8_t tc;
uint16_t fs_miss_flow_id;
};
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, const struct dpni_rx_dist_cfg *cfg);
int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, const struct dpni_rx_dist_cfg *cfg);
int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, uint16_t tpid);
int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, uint16_t tpid);
/**
* struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
* values used in current dpni object to detect 802.1q frames.
* @tpid1: first tag. Not used if zero.
* @tpid2: second tag. Not used if zero.
*/
struct dpni_custom_tpid_cfg {
uint16_t tpid1;
uint16_t tpid2;
};
int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, struct dpni_custom_tpid_cfg *tpid);
#endif /* __FSL_DPNI_H */

View File

@ -108,16 +108,17 @@
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_LOAD_SW_SEQUENCE DPNI_CMD(0x270)
#define DPNI_CMDID_ENABLE_SW_SEQUENCE DPNI_CMD(0x271)
#define DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT DPNI_CMD(0x272)
#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275)
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@ -451,8 +452,6 @@ struct dpni_cmd_enable_vlan_filter {
uint8_t en;
};
#define DPNI_VLAN_SET_QUEUE_ACTION 1
struct dpni_cmd_vlan_id {
uint8_t flags;
uint8_t tc_id;
@ -854,42 +853,6 @@ struct dpni_rsp_get_opr {
uint16_t opr_id;
};
struct dpni_cmd_add_custom_tpid {
uint16_t pad;
uint16_t tpid;
};
struct dpni_cmd_remove_custom_tpid {
uint16_t pad;
uint16_t tpid;
};
struct dpni_rsp_get_custom_tpid {
uint16_t tpid1;
uint16_t tpid2;
};
#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_fs_dist {
uint16_t dist_size;
uint8_t enable;
uint8_t tc;
uint16_t miss_flow_id;
uint16_t pad1;
uint64_t key_cfg_iova;
};
#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_hash_dist {
uint16_t dist_size;
uint8_t enable;
uint8_t tc_id;
uint32_t pad;
uint64_t key_cfg_iova;
};
struct dpni_load_sw_sequence {
uint8_t dest;
uint8_t pad0[7];
@ -957,5 +920,41 @@ struct dpni_rsp_get_port_cfg {
uint32_t bit_params;
};
#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_fs_dist {
uint16_t dist_size;
uint8_t enable;
uint8_t tc;
uint16_t miss_flow_id;
uint16_t pad1;
uint64_t key_cfg_iova;
};
#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
struct dpni_cmd_set_rx_hash_dist {
uint16_t dist_size;
uint8_t enable;
uint8_t tc_id;
uint32_t pad;
uint64_t key_cfg_iova;
};
struct dpni_cmd_add_custom_tpid {
uint16_t pad;
uint16_t tpid;
};
struct dpni_cmd_remove_custom_tpid {
uint16_t pad;
uint16_t tpid;
};
struct dpni_rsp_get_custom_tpid {
uint16_t tpid1;
uint16_t tpid2;
};
#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */