net/mlx5: add default flows for hairpin

When using hairpin all traffic from TX hairpin queues should jump
to dedecated table where matching can be done using regesters.

Signed-off-by: Ori Kam <orika@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
This commit is contained in:
Ori Kam 2019-10-30 23:53:22 +00:00 committed by Ferruh Yigit
parent 830d209161
commit 3c84f34eea
5 changed files with 150 additions and 2 deletions

View File

@ -560,6 +560,7 @@ struct mlx5_flow_tbl_resource {
};
#define MLX5_MAX_TABLES UINT16_MAX
#define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
#define MLX5_MAX_TABLES_FDB UINT16_MAX
#define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */
@ -883,6 +884,7 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
int mlx5_flow_verify(struct rte_eth_dev *dev);
int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
struct rte_flow_item_eth *eth_spec,
struct rte_flow_item_eth *eth_mask,

View File

@ -2819,6 +2819,66 @@ mlx5_flow_verify(struct rte_eth_dev *dev)
return ret;
}
/**
* Enable default hairpin egress flow.
*
* @param dev
* Pointer to Ethernet device.
* @param queue
* The queue index.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
uint32_t queue)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.egress = 1,
.priority = 0,
};
struct mlx5_rte_flow_item_tx_queue queue_spec = {
.queue = queue,
};
struct mlx5_rte_flow_item_tx_queue queue_mask = {
.queue = UINT32_MAX,
};
struct rte_flow_item items[] = {
{
.type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
.spec = &queue_spec,
.last = NULL,
.mask = &queue_mask,
},
{
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
struct rte_flow_action_jump jump = {
.group = MLX5_HAIRPIN_TX_TABLE,
};
struct rte_flow_action actions[2];
struct rte_flow *flow;
struct rte_flow_error error;
actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
actions[0].conf = &jump;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
flow = flow_list_create(dev, &priv->ctrl_flows,
&attr, items, actions, false, &error);
if (!flow) {
DRV_LOG(DEBUG,
"Failed to create ctrl flow: rte_errno(%d),"
" type(%d), message(%s)\n",
rte_errno, error.type,
error.message ? error.message : " (no stated reason)");
return -rte_errno;
}
return 0;
}
/**
* Enable a control flow configured from the control plane.
*

View File

@ -44,6 +44,7 @@ enum modify_reg {
enum mlx5_rte_flow_item_type {
MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
MLX5_RTE_FLOW_ITEM_TYPE_TAG,
MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
};
/* Private rte flow actions. */
@ -64,6 +65,11 @@ struct mlx5_rte_flow_action_set_tag {
rte_be32_t data;
};
/* Matches on source queue. */
struct mlx5_rte_flow_item_tx_queue {
uint32_t queue;
};
/* Pattern outer Layer bits. */
#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
@ -103,6 +109,9 @@ struct mlx5_rte_flow_action_set_tag {
#define MLX5_FLOW_LAYER_NVGRE (1u << 23)
#define MLX5_FLOW_LAYER_GENEVE (1u << 24)
/* Queue items. */
#define MLX5_FLOW_ITEM_TX_QUEUE (1u << 25)
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)

View File

@ -3358,7 +3358,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
int type = items->type;
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
@ -3527,6 +3529,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
@ -3535,11 +3540,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
item_flags |= last_item;
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions, "too many actions");
switch (actions->type) {
switch (type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
@ -3805,6 +3811,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
MLX5_FLOW_ACTION_INC_TCP_ACK :
MLX5_FLOW_ACTION_DEC_TCP_ACK;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
@ -5370,6 +5378,51 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
return 0;
}
/**
* Add Tx queue matcher
*
* @param[in] dev
* Pointer to the dev struct.
* @param[in, out] matcher
* Flow matcher.
* @param[in, out] key
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
*/
static void
flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
void *matcher, void *key,
const struct rte_flow_item *item)
{
const struct mlx5_rte_flow_item_tx_queue *queue_m;
const struct mlx5_rte_flow_item_tx_queue *queue_v;
void *misc_m =
MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v =
MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct mlx5_txq_ctrl *txq;
uint32_t queue;
queue_m = (const void *)item->mask;
if (!queue_m)
return;
queue_v = (const void *)item->spec;
if (!queue_v)
return;
txq = mlx5_txq_get(dev, queue_v->queue);
if (!txq)
return;
queue = txq->obj->sq->id;
MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
queue & queue_m->queue);
mlx5_txq_release(dev, queue_v->queue);
}
/**
* Fill the flow with DV spec.
*
@ -5951,6 +6004,12 @@ cnt_err:
items);
last_item = MLX5_FLOW_ITEM_TAG;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
flow_dv_translate_item_tx_queue(dev, match_mask,
match_value,
items);
last_item = MLX5_FLOW_ITEM_TX_QUEUE;
break;
default:
break;
}

View File

@ -402,6 +402,24 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
unsigned int j;
int ret;
/*
* Hairpin txq default flow should be created no matter if it is
* isolation mode. Or else all the packets to be sent will be sent
* out directly without the TX flow actions, e.g. encapsulation.
*/
for (i = 0; i != priv->txqs_n; ++i) {
struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
ret = mlx5_ctrl_flow_source_queue(dev, i);
if (ret) {
mlx5_txq_release(dev, i);
goto error;
}
}
mlx5_txq_release(dev, i);
}
if (priv->config.dv_esw_en && !priv->config.vf)
if (!mlx5_flow_create_esw_table_zero_flow(dev))
goto error;