net/mlx5: implement descriptor status API
Since there is no "descriptor done" flag like on Intel drivers, the approach is different on mlx5 driver. - for Tx, we call txq_complete() to free descriptors processed by the hw, then we check if the descriptor is between tail and head - for Rx, we need to browse the cqes, managing compressed ones, to get the number of used descriptors. Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
parent
68a43d1bb0
commit
8788fec1f2
@ -27,6 +27,8 @@ VLAN offload = Y
|
||||
L3 checksum offload = Y
|
||||
L4 checksum offload = Y
|
||||
Packet type parsing = Y
|
||||
Rx descriptor status = Y
|
||||
Tx descriptor status = Y
|
||||
Basic stats = Y
|
||||
Stats per queue = Y
|
||||
Multiprocess aware = Y
|
||||
|
@ -222,6 +222,8 @@ static const struct eth_dev_ops mlx5_dev_ops = {
|
||||
.rss_hash_update = mlx5_rss_hash_update,
|
||||
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
|
||||
.filter_ctrl = mlx5_dev_filter_ctrl,
|
||||
.rx_descriptor_status = mlx5_rx_descriptor_status,
|
||||
.tx_descriptor_status = mlx5_tx_descriptor_status,
|
||||
};
|
||||
|
||||
static struct {
|
||||
|
@ -344,6 +344,82 @@ mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
|
||||
*dst = *src;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to check the status of a tx descriptor.
|
||||
*
|
||||
* @param tx_queue
|
||||
* The tx queue.
|
||||
* @param[in] offset
|
||||
* The index of the descriptor in the ring.
|
||||
*
|
||||
* @return
|
||||
* The status of the tx descriptor.
|
||||
*/
|
||||
int
|
||||
mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
|
||||
{
|
||||
struct txq *txq = tx_queue;
|
||||
const unsigned int elts_n = 1 << txq->elts_n;
|
||||
const unsigned int elts_cnt = elts_n - 1;
|
||||
unsigned int used;
|
||||
|
||||
txq_complete(txq);
|
||||
used = (txq->elts_head - txq->elts_tail) & elts_cnt;
|
||||
if (offset < used)
|
||||
return RTE_ETH_TX_DESC_FULL;
|
||||
return RTE_ETH_TX_DESC_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback to check the status of a rx descriptor.
|
||||
*
|
||||
* @param rx_queue
|
||||
* The rx queue.
|
||||
* @param[in] offset
|
||||
* The index of the descriptor in the ring.
|
||||
*
|
||||
* @return
|
||||
* The status of the tx descriptor.
|
||||
*/
|
||||
int
|
||||
mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
|
||||
{
|
||||
struct rxq *rxq = rx_queue;
|
||||
struct rxq_zip *zip = &rxq->zip;
|
||||
volatile struct mlx5_cqe *cqe;
|
||||
const unsigned int cqe_n = (1 << rxq->cqe_n);
|
||||
const unsigned int cqe_cnt = cqe_n - 1;
|
||||
unsigned int cq_ci;
|
||||
unsigned int used;
|
||||
|
||||
/* if we are processing a compressed cqe */
|
||||
if (zip->ai) {
|
||||
used = zip->cqe_cnt - zip->ca;
|
||||
cq_ci = zip->cq_ci;
|
||||
} else {
|
||||
used = 0;
|
||||
cq_ci = rxq->cq_ci;
|
||||
}
|
||||
cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
|
||||
while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
|
||||
int8_t op_own;
|
||||
unsigned int n;
|
||||
|
||||
op_own = cqe->op_own;
|
||||
if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
|
||||
n = ntohl(cqe->byte_cnt);
|
||||
else
|
||||
n = 1;
|
||||
cq_ci += n;
|
||||
used += n;
|
||||
cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
|
||||
}
|
||||
used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
|
||||
if (offset < used)
|
||||
return RTE_ETH_RX_DESC_DONE;
|
||||
return RTE_ETH_RX_DESC_AVAIL;
|
||||
}
|
||||
|
||||
/**
|
||||
* DPDK callback for TX.
|
||||
*
|
||||
|
@ -323,6 +323,8 @@ uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
|
||||
uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
|
||||
uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
|
||||
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
|
||||
int mlx5_rx_descriptor_status(void *, uint16_t);
|
||||
int mlx5_tx_descriptor_status(void *, uint16_t);
|
||||
|
||||
/* mlx5_mr.c */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user