net/mlx5: allow implicit LRO flow

When a user configures LRO in the port offloads, he probably wants each
TCP packet will have a chance to open an LRO session.

The PMD wasn't configure LRO in the flow TIR if the flow is not
explicitly configured TCP item despite the flow included TCP traffic.

For example, the next flows were not LRO offloaded:
pattern eth / end, pattern eth / ip / end, pattern eth / ipv6 / end.

Enable LRO configuration for all the TIRs if LRO is configured in the
port.

No performance impact for non-LRO traffic in these TIRs.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
This commit is contained in:
Matan Azrad 2019-07-29 11:53:28 +00:00 committed by Ferruh Yigit
parent 2579543f60
commit 5158260917
5 changed files with 8 additions and 28 deletions

@ -198,9 +198,6 @@ TAILQ_HEAD(mlx5_flows, rte_flow);
#define MLX5_LRO_ENABLED(dev) \
((dev)->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
#define MLX5_FLOW_IPV4_LRO (1 << 0)
#define MLX5_FLOW_IPV6_LRO (1 << 1)
/* LRO configurations structure. */
struct mlx5_lro_config {
uint32_t supported:1; /* Whether LRO is supported. */

@ -62,9 +62,6 @@ union flow_dv_attr {
uint32_t attr;
};
#define MLX5_FLOW_IPV4_LRO (1 << 0)
#define MLX5_FLOW_IPV6_LRO (1 << 1)
/**
* Initialize flow attributes structure according to flow items' types.
*
@ -5186,26 +5183,13 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
(*flow->queue),
flow->rss.queue_num);
if (!hrxq) {
int lro = 0;
if (mlx5_lro_on(dev)) {
if ((dev_flow->layers &
MLX5_FLOW_LAYER_IPV4_LRO)
== MLX5_FLOW_LAYER_IPV4_LRO)
lro = MLX5_FLOW_IPV4_LRO;
else if ((dev_flow->layers &
MLX5_FLOW_LAYER_IPV6_LRO)
== MLX5_FLOW_LAYER_IPV6_LRO)
lro = MLX5_FLOW_IPV6_LRO;
}
hrxq = mlx5_hrxq_new
(dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields, (*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL), lro);
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,

@ -1669,8 +1669,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
(*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL),
0);
MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,

@ -2100,8 +2100,6 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
* Number of queues.
* @param tunnel
* Tunnel type.
* @param lro
* Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
*
* @return
* The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
@ -2111,7 +2109,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
int tunnel __rte_unused, int lro)
int tunnel __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
@ -2218,11 +2216,13 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
if (dev->data->dev_conf.lpbk_mode)
tir_attr.self_lb_block =
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
if (lro) {
if (mlx5_lro_on(dev)) {
tir_attr.lro_timeout_period_usecs =
priv->config.lro.timeout;
tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
tir_attr.lro_enable_mask = lro;
tir_attr.lro_enable_mask =
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
}
tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
if (!tir) {

@ -358,7 +358,7 @@ struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
int tunnel __rte_unused, int lro);
int tunnel __rte_unused);
struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,