net/qede: support Rx descriptor status

This patch implement eth_dev_ops->rx_descriptor_status
callback.
Walk through receive completion ring to calculate receive
descriptors used by firmware and then provide the status of
offset accordingly.

Signed-off-by: Shahed Shaikh <shahed.shaikh@cavium.com>
This commit is contained in:
Shahed Shaikh 2018-09-08 13:31:04 -07:00 committed by Ferruh Yigit
parent 318d7da312
commit 3f373e1a0a
3 changed files with 85 additions and 0 deletions

View File

@ -2299,6 +2299,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
.rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
@ -2340,6 +2341,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
.rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,

View File

@ -2151,3 +2151,84 @@ qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
{
return 0;
}
/* this function does a fake walk through over completion queue
* to calculate number of BDs used by HW.
* At the end, it restores the state of completion queue.
*/
static uint16_t
qede_parse_fp_cqe(struct qede_rx_queue *rxq)
{
uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
union eth_rx_cqe *cqe, *orig_cqe = NULL;
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
if (hw_comp_cons == sw_comp_cons)
return 0;
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
orig_cqe = cqe;
while (sw_comp_cons != hw_comp_cons) {
switch (cqe->fast_path_regular.type) {
case ETH_RX_CQE_TYPE_REGULAR:
bd_count += cqe->fast_path_regular.bd_num;
break;
case ETH_RX_CQE_TYPE_TPA_END:
bd_count += cqe->fast_path_tpa_end.num_of_bds;
break;
default:
break;
}
cqe =
(union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
}
/* revert comp_ring to original state */
ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
return bd_count;
}
int
qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
{
uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
uint16_t produced, consumed;
struct qede_rx_queue *rxq = p_rxq;
if (offset > rxq->nb_rx_desc)
return -EINVAL;
sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
/* find BDs used by HW from completion queue elements */
hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
if (hw_bd_cons < sw_bd_cons)
/* wraparound case */
consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
else
consumed = hw_bd_cons - sw_bd_cons;
if (offset <= consumed)
return RTE_ETH_RX_DESC_DONE;
if (sw_bd_prod < sw_bd_cons)
/* wraparound case */
produced = (0xffff - sw_bd_cons) + sw_bd_prod;
else
produced = sw_bd_prod - sw_bd_cons;
if (offset <= produced)
return RTE_ETH_RX_DESC_AVAIL;
return RTE_ETH_RX_DESC_UNAVAIL;
}

View File

@ -276,6 +276,8 @@ int qede_start_queues(struct rte_eth_dev *eth_dev);
void qede_stop_queues(struct rte_eth_dev *eth_dev);
int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
uint16_t max_frame_size);
int
qede_rx_descriptor_status(void *rxq, uint16_t offset);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);