net/avf: enable ops to check queue info and status

- rxq_info_get
 - txq_info_get
 - rx_queue_count
 - rx_descriptor_status
 - tx_descriptor_status

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
This commit is contained in:
Jingjing Wu 2018-01-10 21:02:02 +08:00 committed by Ferruh Yigit
parent 3fd7a3719c
commit ca01bc1ab7
4 changed files with 134 additions and 0 deletions

View File

@ -25,6 +25,8 @@ VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Multiprocess aware = Y
BSD nic_uio = Y

View File

@ -105,6 +105,11 @@ static const struct eth_dev_ops avf_eth_dev_ops = {
.reta_query = avf_dev_rss_reta_query,
.rss_hash_update = avf_dev_rss_hash_update,
.rss_hash_conf_get = avf_dev_rss_hash_conf_get,
.rxq_info_get = avf_dev_rxq_info_get,
.txq_info_get = avf_dev_txq_info_get,
.rx_queue_count = avf_dev_rxq_count,
.rx_descriptor_status = avf_dev_rx_desc_status,
.tx_descriptor_status = avf_dev_tx_desc_status,
.mtu_set = avf_dev_mtu_set,
};

View File

@ -1385,3 +1385,123 @@ avf_set_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = avf_xmit_pkts;
dev->tx_pkt_prepare = avf_prep_pkts;
}
void
avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo)
{
struct avf_rx_queue *rxq;
rxq = dev->data->rx_queues[queue_id];
qinfo->mp = rxq->mp;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = TRUE;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
}
void
avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
struct avf_tx_queue *txq;
txq = dev->data->tx_queues[queue_id];
qinfo->nb_desc = txq->nb_tx_desc;
qinfo->conf.tx_free_thresh = txq->free_thresh;
qinfo->conf.tx_rs_thresh = txq->rs_thresh;
qinfo->conf.txq_flags = txq->txq_flags;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
/* Get the number of used descriptors of a rx queue */
uint32_t
avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
{
#define AVF_RXQ_SCAN_INTERVAL 4
volatile union avf_rx_desc *rxdp;
struct avf_rx_queue *rxq;
uint16_t desc = 0;
rxq = dev->data->rx_queues[queue_id];
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
(1 << AVF_RX_DESC_STATUS_DD_SHIFT)) {
/* Check the DD bit of a rx descriptor of each 4 in a group,
* to avoid checking too frequently and downgrading performance
* too much.
*/
desc += AVF_RXQ_SCAN_INTERVAL;
rxdp += AVF_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
rxdp = &(rxq->rx_ring[rxq->rx_tail +
desc - rxq->nb_rx_desc]);
}
return desc;
}
int
avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
{
struct avf_rx_queue *rxq = rx_queue;
volatile uint64_t *status;
uint64_t mask;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
return -EINVAL;
if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
return RTE_ETH_RX_DESC_UNAVAIL;
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT)
<< AVF_RXD_QW1_STATUS_SHIFT);
if (*status & mask)
return RTE_ETH_RX_DESC_DONE;
return RTE_ETH_RX_DESC_AVAIL;
}
int
avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
{
struct avf_tx_queue *txq = tx_queue;
volatile uint64_t *status;
uint64_t mask, expect;
uint32_t desc;
if (unlikely(offset >= txq->nb_tx_desc))
return -EINVAL;
desc = txq->tx_tail + offset;
/* go to next desc that has the RS bit */
desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
txq->rs_thresh;
if (desc >= txq->nb_tx_desc) {
desc -= txq->nb_tx_desc;
if (desc >= txq->nb_tx_desc)
desc -= txq->nb_tx_desc;
}
status = &txq->tx_ring[desc].cmd_type_offset_bsz;
mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK);
expect = rte_cpu_to_le_64(
AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT);
if ((*status & mask) == expect)
return RTE_ETH_TX_DESC_DONE;
return RTE_ETH_TX_DESC_FULL;
}

View File

@ -147,6 +147,13 @@ uint16_t avf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
void avf_set_rx_function(struct rte_eth_dev *dev);
void avf_set_tx_function(struct rte_eth_dev *dev);
void avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
uint32_t avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
int avf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
int avf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
static inline
void avf_dump_rx_descriptor(struct avf_rx_queue *rxq,