net/mlx5: fix LRO requirements check

One of the conditions to allow LRO offload is the DV configuration.

The function incorrectly checks the DV configuration before initializing
it by the user devarg; hence, LRO cannot be allowed.

This patch moves this check to mlx5_shared_dev_ctx_args_config, where DV
configuration is initialized.

Fixes: c4b8620135 ("net/mlx5: refactor to detect operation by DevX")
Cc: stable@dpdk.org

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Reported-by: Gal Shalom <galshalom@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
Michael Baum 2022-07-27 15:24:06 +03:00 committed by Raslan Darawsheh
parent 2d8dde8d63
commit 593f913a8e
5 changed files with 15 additions and 18 deletions

View File

@ -370,15 +370,6 @@ mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
"DevX does not provide UAR offset, can't create queues for packet pacing.");
sh->dev_cap.txpp_en = 0;
#endif
/* Check for LRO support. */
if (mlx5_devx_obj_ops_en(sh) && hca_attr->lro_cap) {
/* TBD check tunnel lro caps. */
sh->dev_cap.lro_supported = 1;
DRV_LOG(DEBUG, "Device supports LRO.");
DRV_LOG(DEBUG,
"LRO minimal size of TCP segment required for coalescing is %d bytes.",
hca_attr->lro_min_mss_size);
}
sh->dev_cap.scatter_fcs_w_decap_disable =
hca_attr->scatter_fcs_w_decap_disable;
sh->dev_cap.rq_delay_drop_en = hca_attr->rq_delay_drop;

View File

@ -1327,6 +1327,15 @@ mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
DRV_LOG(WARNING,
"\"tx_skew\" doesn't affect without \"tx_pp\".");
}
/* Check for LRO support. */
if (mlx5_devx_obj_ops_en(sh) && sh->cdev->config.hca_attr.lro_cap) {
/* TBD check tunnel lro caps. */
config->lro_allowed = 1;
DRV_LOG(DEBUG, "LRO is allowed.");
DRV_LOG(DEBUG,
"LRO minimal size of TCP segment required for coalescing is %d bytes.",
sh->cdev->config.hca_attr.lro_min_mss_size);
}
/*
* If HW has bug working with tunnel packet decapsulation and scatter
* FCS, and decapsulation is needed, clear the hw_fcs_strip bit.
@ -2392,10 +2401,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist,
config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
config->mps == MLX5_MPW ? "legacy " : "",
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
/* LRO is supported only when DV flow enabled. */
if (dev_cap->lro_supported && !priv->sh->config.dv_flow_en)
dev_cap->lro_supported = 0;
if (dev_cap->lro_supported) {
if (priv->sh->config.lro_allowed) {
/*
* If LRO timeout is not configured by application,
* use the minimal supported value.

View File

@ -151,7 +151,6 @@ struct mlx5_dev_cap {
/* HW has bug working with tunnel packet decap and scatter FCS. */
uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
uint32_t rt_timestamp:1; /* Realtime timestamp format. */
uint32_t lro_supported:1; /* Whether LRO is supported. */
uint32_t rq_delay_drop_en:1; /* Enable RxQ delay drop. */
uint32_t tunnel_en:3;
/* Whether tunnel stateless offloads are supported. */
@ -308,6 +307,7 @@ struct mlx5_sh_config {
uint32_t decap_en:1; /* Whether decap will be used or not. */
uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
uint32_t allow_duplicate_pattern:1;
uint32_t lro_allowed:1; /* Whether LRO is allowed. */
/* Allow/Prevent the duplicate rules pattern. */
};

View File

@ -835,7 +835,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
if (dev->data->dev_conf.lpbk_mode)
tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
if (lro) {
MLX5_ASSERT(priv->sh->dev_cap.lro_supported);
MLX5_ASSERT(priv->sh->config.lro_allowed);
tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout;
tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
tir_attr->lro_enable_mask =

View File

@ -374,7 +374,7 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
if (priv->sh->dev_cap.hw_vlan_strip)
offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (priv->sh->dev_cap.lro_supported)
if (priv->sh->config.lro_allowed)
offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
return offloads;
}
@ -843,9 +843,9 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
bool is_extmem = false;
if ((offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
!priv->sh->dev_cap.lro_supported) {
!priv->sh->config.lro_allowed) {
DRV_LOG(ERR,
"Port %u queue %u LRO is configured but not supported.",
"Port %u queue %u LRO is configured but not allowed.",
dev->data->port_id, idx);
rte_errno = EINVAL;
return -rte_errno;