net/mlx5: fix Tx doorbell

Too much data is uselessly written to the Tx doorbell, which since v16.11
may also cause Tx queues to behave erratically and crash applications.

This regression was seen on VF devices when the BlueFlame buffer size is
zero (txq->bf_buf_size) due to the following change:

 -       txq->bf_offset ^= txq->bf_buf_size;
 +       txq->bf_offset ^= (1 << txq->bf_buf_size);

Fixes: 1d88ba1719 ("net/mlx5: refactor Tx data path")
Fixes: d5793daefe ("net/mlx5: reduce memory overhead for BF handling")

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
Nélio Laranjeiro 2016-12-09 14:27:58 +01:00 committed by Ferruh Yigit
parent 0f3f22298e
commit 30807f62b2
3 changed files with 12 additions and 18 deletions

View File

@ -82,7 +82,8 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
__attribute__((always_inline));
static inline void
mlx5_tx_dbrec(struct txq *txq) __attribute__((always_inline));
mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
__attribute__((always_inline));
static inline uint32_t
rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
@ -326,23 +327,20 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
*
* @param txq
* Pointer to TX queue structure.
* @param wqe
* Pointer to the last WQE posted in the NIC.
*/
static inline void
mlx5_tx_dbrec(struct txq *txq)
mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
{
uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
uint32_t data[4] = {
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
htonl(txq->qp_num_8s),
0,
0,
};
uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
volatile uint64_t *src = ((volatile uint64_t *)wqe);
rte_wmb();
*txq->qp_db = htonl(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
memcpy(dst, (uint8_t *)data, 16);
txq->bf_offset ^= (1 << txq->bf_buf_size);
*dst = *src;
}
/**
@ -610,7 +608,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
mlx5_tx_dbrec(txq);
mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
txq->elts_head = elts_head;
return i;
}
@ -817,7 +815,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Ring QP doorbell. */
if (mpw.state == MLX5_MPW_STATE_OPENED)
mlx5_mpw_close(txq, &mpw);
mlx5_tx_dbrec(txq);
mlx5_tx_dbrec(txq, mpw.wqe);
txq->elts_head = elts_head;
return i;
}
@ -1085,7 +1083,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
mlx5_mpw_inline_close(txq, &mpw);
else if (mpw.state == MLX5_MPW_STATE_OPENED)
mlx5_mpw_close(txq, &mpw);
mlx5_tx_dbrec(txq);
mlx5_tx_dbrec(txq, mpw.wqe);
txq->elts_head = elts_head;
return i;
}

View File

@ -252,8 +252,6 @@ struct txq {
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
uint16_t bf_buf_size:4; /* Log2 Blueflame size. */
uint16_t bf_offset; /* Blueflame offset. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */

View File

@ -220,8 +220,6 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
tmpl->txq.bf_reg = qp->gen_data.bf->reg;
tmpl->txq.bf_offset = qp->gen_data.bf->offset;
tmpl->txq.bf_buf_size = log2above(qp->gen_data.bf->buf_size);
tmpl->txq.cq_db = cq->dbrec;
tmpl->txq.cqes =
(volatile struct mlx5_cqe (*)[])