net/mlx5: fix Rx buffer replenishment threshold

The threshold of buffer replenishment for vectorized Rx burst is a constant
value (64). If the size of Rx queue is comparatively small, device could
run out of buffers. For example, if the size of Rx queue is 128, buffers
are replenished only twice per a wraparound. This can cause jitter in
receiving packets and the jitter can cause unnecessary retransmission for
TCP connections.

Fixes: 6cb559d67b ("net/mlx5: add vectorized Rx/Tx burst for x86")
Fixes: 570acdb1da ("net/mlx5: add vectorized Rx/Tx burst for ARM")
Cc: stable@dpdk.org

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
This commit is contained in:
Yongseok Koh 2018-06-26 04:33:35 -07:00 committed by Ferruh Yigit
parent 342a7bdd6e
commit e10245a13b
4 changed files with 7 additions and 6 deletions

View File

@ -64,10 +64,11 @@
#define MLX5_VPMD_MIN_TXQS 4 #define MLX5_VPMD_MIN_TXQS 4
/* Threshold of buffer replenishment for vectorized Rx. */ /* Threshold of buffer replenishment for vectorized Rx. */
#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U #define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
(RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
/* Maximum size of burst for vectorized Rx. */ /* Maximum size of burst for vectorized Rx. */
#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH #define MLX5_VPMD_RX_MAX_BURST 64U
/* /*
* Maximum size of burst for vectorized Tx. This is related to the maximum size * Maximum size of burst for vectorized Tx. This is related to the maximum size

View File

@ -91,9 +91,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
unsigned int i; unsigned int i;
assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH); assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi))); assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP); assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */ /* Not to cross queue end. */
n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {

View File

@ -739,7 +739,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/ */
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH) if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */ /* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi; rcvd_pkt = rxq->cq_ci - rxq->rq_pi;

View File

@ -724,7 +724,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/ */
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH) if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */ /* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi; rcvd_pkt = rxq->cq_ci - rxq->rq_pi;