net/mlx5: add hardware steering item translation

This provides shared item tranlsation code for hardware
steering root table flows as they still work under FW steering mode.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
This commit is contained in:
Suanming Mou 2022-10-20 18:57:33 +03:00 committed by Raslan Darawsheh
parent cd4ab74206
commit 75a00812b1
3 changed files with 165 additions and 47 deletions

View File

@ -63,10 +63,6 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
uint32_t group, uint32_t *table, uint32_t group, uint32_t *table,
struct rte_flow_error *error); struct rte_flow_error *error);
static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
static void mlx5_flow_pop_thread_workspace(void);
/** Device flow drivers. */ /** Device flow drivers. */
extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops; extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
@ -7108,7 +7104,7 @@ mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
struct rte_flow_item_port_id port_spec = { struct rte_flow_item_port_id port_spec = {
.id = MLX5_PORT_ESW_MGR, .id = MLX5_PORT_ESW_MGR,
}; };
struct mlx5_rte_flow_item_tx_queue txq_spec = { struct mlx5_rte_flow_item_sq txq_spec = {
.queue = txq, .queue = txq,
}; };
struct rte_flow_item pattern[] = { struct rte_flow_item pattern[] = {
@ -7118,7 +7114,7 @@ mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t txq)
}, },
{ {
.type = (enum rte_flow_item_type) .type = (enum rte_flow_item_type)
MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, MLX5_RTE_FLOW_ITEM_TYPE_SQ,
.spec = &txq_spec, .spec = &txq_spec,
}, },
{ {
@ -7404,7 +7400,7 @@ flow_alloc_thread_workspace(void)
* *
* @return pointer to thread specific flow workspace data, NULL on error. * @return pointer to thread specific flow workspace data, NULL on error.
*/ */
static struct mlx5_flow_workspace* struct mlx5_flow_workspace*
mlx5_flow_push_thread_workspace(void) mlx5_flow_push_thread_workspace(void)
{ {
struct mlx5_flow_workspace *curr; struct mlx5_flow_workspace *curr;
@ -7441,7 +7437,7 @@ mlx5_flow_push_thread_workspace(void)
* *
* @return pointer to thread specific flow workspace data, NULL on error. * @return pointer to thread specific flow workspace data, NULL on error.
*/ */
static void void
mlx5_flow_pop_thread_workspace(void) mlx5_flow_pop_thread_workspace(void)
{ {
struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace(); struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
@ -7504,16 +7500,16 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
.egress = 1, .egress = 1,
.priority = 0, .priority = 0,
}; };
struct mlx5_rte_flow_item_tx_queue queue_spec = { struct mlx5_rte_flow_item_sq queue_spec = {
.queue = queue, .queue = queue,
}; };
struct mlx5_rte_flow_item_tx_queue queue_mask = { struct mlx5_rte_flow_item_sq queue_mask = {
.queue = UINT32_MAX, .queue = UINT32_MAX,
}; };
struct rte_flow_item items[] = { struct rte_flow_item items[] = {
{ {
.type = (enum rte_flow_item_type) .type = (enum rte_flow_item_type)
MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, MLX5_RTE_FLOW_ITEM_TYPE_SQ,
.spec = &queue_spec, .spec = &queue_spec,
.last = NULL, .last = NULL,
.mask = &queue_mask, .mask = &queue_mask,

View File

@ -28,7 +28,7 @@
enum mlx5_rte_flow_item_type { enum mlx5_rte_flow_item_type {
MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN, MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
MLX5_RTE_FLOW_ITEM_TYPE_TAG, MLX5_RTE_FLOW_ITEM_TYPE_TAG,
MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE, MLX5_RTE_FLOW_ITEM_TYPE_SQ,
MLX5_RTE_FLOW_ITEM_TYPE_VLAN, MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL, MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
}; };
@ -95,7 +95,7 @@ struct mlx5_flow_action_copy_mreg {
}; };
/* Matches on source queue. */ /* Matches on source queue. */
struct mlx5_rte_flow_item_tx_queue { struct mlx5_rte_flow_item_sq {
uint32_t queue; uint32_t queue;
}; };
@ -159,7 +159,7 @@ enum mlx5_feature_name {
#define MLX5_FLOW_LAYER_GENEVE (1u << 26) #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
/* Queue items. */ /* Queue items. */
#define MLX5_FLOW_ITEM_TX_QUEUE (1u << 27) #define MLX5_FLOW_ITEM_SQ (1u << 27)
/* Pattern tunnel Layer bits (continued). */ /* Pattern tunnel Layer bits (continued). */
#define MLX5_FLOW_LAYER_GTP (1u << 28) #define MLX5_FLOW_LAYER_GTP (1u << 28)
@ -196,6 +196,9 @@ enum mlx5_feature_name {
#define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41) #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
#define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42) #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
/* Meter color item */
#define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
/* Outer Masks. */ /* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \ #define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@ -1009,6 +1012,18 @@ flow_items_to_tunnel(const struct rte_flow_item items[])
return items[0].spec; return items[0].spec;
} }
/* HW steering flow attributes. */
struct mlx5_flow_attr {
uint32_t port_id; /* Port index. */
uint32_t group; /* Flow group. */
uint32_t priority; /* Original Priority. */
/* rss level, used by priority adjustment. */
uint32_t rss_level;
/* Action flags, used by priority adjustment. */
uint32_t act_flags;
uint32_t tbl_type; /* Flow table type. */
};
/* Flow structure. */ /* Flow structure. */
struct rte_flow { struct rte_flow {
uint32_t dev_handles; uint32_t dev_handles;
@ -1601,6 +1616,8 @@ struct mlx5_flow_driver_ops {
/* mlx5_flow.c */ /* mlx5_flow.c */
struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
void mlx5_flow_pop_thread_workspace(void);
struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
__extension__ __extension__
struct flow_grp_info { struct flow_grp_info {
@ -1769,6 +1786,32 @@ mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
int flow_hw_q_flow_flush(struct rte_eth_dev *dev, int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error); struct rte_flow_error *error);
/*
* Convert rte_mtr_color to mlx5 color.
*
* @param[in] rcol
* rte_mtr_color.
*
* @return
* mlx5 color.
*/
static inline int
rte_col_2_mlx5_col(enum rte_color rcol)
{
switch (rcol) {
case RTE_COLOR_GREEN:
return MLX5_FLOW_COLOR_GREEN;
case RTE_COLOR_YELLOW:
return MLX5_FLOW_COLOR_YELLOW;
case RTE_COLOR_RED:
return MLX5_FLOW_COLOR_RED;
default:
break;
}
return MLX5_FLOW_COLOR_UNDEFINED;
}
int mlx5_flow_group_to_table(struct rte_eth_dev *dev, int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel, const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table, uint32_t group, uint32_t *table,
@ -2128,4 +2171,9 @@ int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
bool *all_ports, bool *all_ports,
struct rte_flow_error *error); struct rte_flow_error *error);
int flow_dv_translate_items_hws(const struct rte_flow_item *items,
struct mlx5_flow_attr *attr, void *key,
uint32_t key_type, uint64_t *item_flags,
uint8_t *match_criteria,
struct rte_flow_error *error);
#endif /* RTE_PMD_MLX5_FLOW_H_ */ #endif /* RTE_PMD_MLX5_FLOW_H_ */

View File

@ -212,31 +212,6 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
attr->valid = 1; attr->valid = 1;
} }
/*
* Convert rte_mtr_color to mlx5 color.
*
* @param[in] rcol
* rte_mtr_color.
*
* @return
* mlx5 color.
*/
static inline int
rte_col_2_mlx5_col(enum rte_color rcol)
{
switch (rcol) {
case RTE_COLOR_GREEN:
return MLX5_FLOW_COLOR_GREEN;
case RTE_COLOR_YELLOW:
return MLX5_FLOW_COLOR_YELLOW;
case RTE_COLOR_RED:
return MLX5_FLOW_COLOR_RED;
default:
break;
}
return MLX5_FLOW_COLOR_UNDEFINED;
}
struct field_modify_info { struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */ uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */ uint32_t offset; /* Offset of field in protocol header, in bytes. */
@ -7338,8 +7313,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret; return ret;
last_item = MLX5_FLOW_ITEM_TAG; last_item = MLX5_FLOW_ITEM_TAG;
break; break;
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
last_item = MLX5_FLOW_ITEM_TX_QUEUE; last_item = MLX5_FLOW_ITEM_SQ;
break; break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG: case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
break; break;
@ -8225,7 +8200,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
* work due to metadata regC0 mismatch. * work due to metadata regC0 mismatch.
*/ */
if ((!attr->transfer && attr->egress) && priv->representor && if ((!attr->transfer && attr->egress) && priv->representor &&
!(item_flags & MLX5_FLOW_ITEM_TX_QUEUE)) !(item_flags & MLX5_FLOW_ITEM_SQ))
return rte_flow_error_set(error, EINVAL, return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, RTE_FLOW_ERROR_TYPE_ITEM,
NULL, NULL,
@ -11244,9 +11219,9 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
const struct rte_flow_item *item, const struct rte_flow_item *item,
uint32_t key_type) uint32_t key_type)
{ {
const struct mlx5_rte_flow_item_tx_queue *queue_m; const struct mlx5_rte_flow_item_sq *queue_m;
const struct mlx5_rte_flow_item_tx_queue *queue_v; const struct mlx5_rte_flow_item_sq *queue_v;
const struct mlx5_rte_flow_item_tx_queue queue_mask = { const struct mlx5_rte_flow_item_sq queue_mask = {
.queue = UINT32_MAX, .queue = UINT32_MAX,
}; };
void *misc_v = void *misc_v =
@ -13231,9 +13206,9 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
flow_dv_translate_mlx5_item_tag(dev, key, items, key_type); flow_dv_translate_mlx5_item_tag(dev, key, items, key_type);
last_item = MLX5_FLOW_ITEM_TAG; last_item = MLX5_FLOW_ITEM_TAG;
break; break;
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE: case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
flow_dv_translate_item_tx_queue(dev, key, items, key_type); flow_dv_translate_item_tx_queue(dev, key, items, key_type);
last_item = MLX5_FLOW_ITEM_TX_QUEUE; last_item = MLX5_FLOW_ITEM_SQ;
break; break;
case RTE_FLOW_ITEM_TYPE_GTP: case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(key, items, tunnel, key_type); flow_dv_translate_item_gtp(key, items, tunnel, key_type);
@ -13273,6 +13248,105 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
return 0; return 0;
} }
/**
* Fill the HW steering flow with DV spec.
*
* @param[in] items
* Pointer to the list of items.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] key
* Pointer to the flow matcher key.
* @param[in] key_type
* Key type.
* @param[in, out] item_flags
* Pointer to the flow item flags.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
flow_dv_translate_items_hws(const struct rte_flow_item *items,
struct mlx5_flow_attr *attr, void *key,
uint32_t key_type, uint64_t *item_flags,
uint8_t *match_criteria,
struct rte_flow_error *error)
{
struct mlx5_flow_workspace *flow_wks = mlx5_flow_push_thread_workspace();
struct mlx5_flow_rss_desc rss_desc = { .level = attr->rss_level };
struct rte_flow_attr rattr = {
.group = attr->group,
.priority = attr->priority,
.ingress = !!(attr->tbl_type == MLX5DR_TABLE_TYPE_NIC_RX),
.egress = !!(attr->tbl_type == MLX5DR_TABLE_TYPE_NIC_TX),
.transfer = !!(attr->tbl_type == MLX5DR_TABLE_TYPE_FDB),
};
struct mlx5_dv_matcher_workspace wks = {
.action_flags = attr->act_flags,
.item_flags = item_flags ? *item_flags : 0,
.external = 0,
.next_protocol = 0xff,
.attr = &rattr,
.rss_desc = &rss_desc,
};
int ret = 0;
RTE_SET_USED(flow_wks);
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
if (!mlx5_flow_os_item_supported(items->type)) {
ret = rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
goto exit;
}
ret = flow_dv_translate_items(&rte_eth_devices[attr->port_id],
items, &wks, key, key_type, NULL);
if (ret)
goto exit;
}
if (wks.item_flags & MLX5_FLOW_LAYER_VXLAN_GPE) {
flow_dv_translate_item_vxlan_gpe(key,
wks.tunnel_item,
wks.item_flags,
key_type);
} else if (wks.item_flags & MLX5_FLOW_LAYER_GENEVE) {
flow_dv_translate_item_geneve(key,
wks.tunnel_item,
wks.item_flags,
key_type);
} else if (wks.item_flags & MLX5_FLOW_LAYER_GRE) {
if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE) {
flow_dv_translate_item_gre(key,
wks.tunnel_item,
wks.item_flags,
key_type);
} else if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION) {
flow_dv_translate_item_gre_option(key,
wks.tunnel_item,
wks.gre_item,
wks.item_flags,
key_type);
} else if (wks.tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
flow_dv_translate_item_nvgre(key,
wks.tunnel_item,
wks.item_flags,
key_type);
} else {
MLX5_ASSERT(false);
}
}
if (match_criteria)
*match_criteria = flow_dv_matcher_enable(key);
if (item_flags)
*item_flags = wks.item_flags;
exit:
mlx5_flow_pop_thread_workspace();
return ret;
}
/** /**
* Fill the SW steering flow with DV spec. * Fill the SW steering flow with DV spec.
* *