mlx5en(4): Don't wait for receive queue to fill up with mbufs during open channels.

Failure to get mbufs may be transient.
Don't permanently fail to open the channels due to lack of mbufs.
This also makes modifying channel parameters faster.

MFC after:	1 week
Sponsored by:	NVIDIA Networking
This commit is contained in:
Hans Petter Selasky 2023-04-18 13:42:17 +02:00
parent 6bd4bb9bdb
commit 1943c40cd6
3 changed files with 0 additions and 33 deletions

View File

@ -107,7 +107,6 @@
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
#define MLX5E_CACHELINE_SIZE CACHE_LINE_SIZE
#define MLX5E_HW2SW_MTU(hwmtu) \
@ -676,7 +675,6 @@ struct mlx5e_params {
u16 rx_cq_moderation_pkts;
u16 tx_cq_moderation_usec;
u16 tx_cq_moderation_pkts;
u16 min_rx_wqes;
bool hw_lro_en;
bool cqe_zipping_en;
u32 lro_wqe_sz;

View File

@ -1035,11 +1035,6 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
priv->params_ethtool.rx_queue_size =
1 << priv->params.log_rq_size;
/* update least number of RX WQEs */
priv->params.min_rx_wqes = min(
priv->params_ethtool.rx_queue_size - 1,
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
/* restart network interface, if any */
if (was_opened)
mlx5e_open_locked(priv->ifp);

View File

@ -1416,23 +1416,6 @@ mlx5e_disable_rq(struct mlx5e_rq *rq)
mlx5_core_destroy_rq(mdev, rq->rqn);
}
static int
mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_wq_ll *wq = &rq->wq;
int i;
for (i = 0; i < 1000; i++) {
if (wq->cur_sz >= priv->params.min_rx_wqes)
return (0);
msleep(4);
}
return (-ETIMEDOUT);
}
static int
mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
@ -2518,7 +2501,6 @@ mlx5e_open_channels(struct mlx5e_priv *priv)
struct mlx5e_channel_param *cparam;
int err;
int i;
int j;
cparam = malloc(sizeof(*cparam), M_MLX5EN, M_WAITOK);
@ -2552,12 +2534,6 @@ mlx5e_open_channels(struct mlx5e_priv *priv)
intr_setaffinity(irq, CPU_WHICH_INTRHANDLER, &cpuset);
}
}
for (j = 0; j < priv->params.num_channels; j++) {
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
if (err)
goto err_close_channels;
}
free(cparam, M_MLX5EN);
return (0);
@ -3849,8 +3825,6 @@ mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.rx_hash_log_tbl_sz =
(order_base_2(num_comp_vectors) >
MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?