net/mlx5: fix port initialization with small LRO
If application provided maximal LRO size was less than expected PMD
minimum, the PMD either crashed with assert, if asserts were enabled,
or proceeded with port initialization to set port private maximal
LRO size below supported minimum.
The patch terminates port start if LRO size
does not match PMD requirements and TCP LRO offload was requested
at least for one Rx queue.
Fixes: 50c00baff7
("net/mlx5: limit LRO size to maximum Rx packet")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
7869d60380
commit
b9f1f4c239
@ -1533,7 +1533,6 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
|
||||
MLX5_MAX_TCP_HDR_OFFSET)
|
||||
max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
|
||||
max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
|
||||
MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
|
||||
max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
|
||||
if (priv->max_lro_msg_size)
|
||||
priv->max_lro_msg_size =
|
||||
|
@ -1167,6 +1167,22 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
else
|
||||
rte_net_mlx5_dynf_inline_mask = 0;
|
||||
if (dev->data->nb_rx_queues > 0) {
|
||||
uint32_t max_lro_msg_size = priv->max_lro_msg_size;
|
||||
|
||||
if (max_lro_msg_size < MLX5_LRO_SEG_CHUNK_SIZE) {
|
||||
uint32_t i;
|
||||
struct mlx5_rxq_priv *rxq;
|
||||
|
||||
for (i = 0; i != priv->rxqs_n; ++i) {
|
||||
rxq = mlx5_rxq_get(dev, i);
|
||||
if (rxq && rxq->ctrl && rxq->ctrl->rxq.lro) {
|
||||
DRV_LOG(ERR, "port %u invalid max LRO size",
|
||||
dev->data->port_id);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = mlx5_dev_configure_rss_reta(dev);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "port %u reta config failed: %s",
|
||||
|
Loading…
Reference in New Issue
Block a user