net/txgbe: add Rx and Tx descriptor status

Supports check the status of Rx and Tx descriptors.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Jiawen Wu 2020-10-27 14:23:14 +08:00 committed by Ferruh Yigit
parent eaf691f950
commit c22e6c7ae1
4 changed files with 84 additions and 0 deletions

View File

@ -36,6 +36,8 @@ Inner L3 checksum = P
Inner L4 checksum = P
Packet type parsing = Y
Timesync = Y
Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y

View File

@ -480,6 +480,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &txgbe_eth_dev_ops;
eth_dev->rx_queue_count = txgbe_dev_rx_queue_count;
eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;

View File

@ -219,6 +219,12 @@ int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
int txgbe_dev_rx_init(struct rte_eth_dev *dev);
void txgbe_dev_tx_init(struct rte_eth_dev *dev);

View File

@ -2529,6 +2529,79 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
uint32_t
txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define TXGBE_RXQ_SCAN_INTERVAL 4
volatile struct txgbe_rx_desc *rxdp;
struct txgbe_rx_queue *rxq;
uint32_t desc = 0;
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
(rxdp->qw1.lo.status &
rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) {
desc += TXGBE_RXQ_SCAN_INTERVAL;
rxdp += TXGBE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
rxdp = &(rxq->rx_ring[rxq->rx_tail +
desc - rxq->nb_rx_desc]);
}
return desc;
}
int
txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
struct txgbe_rx_queue *rxq = rx_queue;
volatile uint32_t *status;
uint32_t nb_hold, desc;
if (unlikely(offset >= rxq->nb_rx_desc))
return -EINVAL;
nb_hold = rxq->nb_rx_hold;
if (offset >= rxq->nb_rx_desc - nb_hold)
return RTE_ETH_RX_DESC_UNAVAIL;
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
status = &rxq->rx_ring[desc].qw1.lo.status;
if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))
return RTE_ETH_RX_DESC_DONE;
return RTE_ETH_RX_DESC_AVAIL;
}
int
txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
struct txgbe_tx_queue *txq = tx_queue;
volatile uint32_t *status;
uint32_t desc;
if (unlikely(offset >= txq->nb_tx_desc))
return -EINVAL;
desc = txq->tx_tail + offset;
if (desc >= txq->nb_tx_desc) {
desc -= txq->nb_tx_desc;
if (desc >= txq->nb_tx_desc)
desc -= txq->nb_tx_desc;
}
status = &txq->tx_ring[desc].dw3;
if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD))
return RTE_ETH_TX_DESC_DONE;
return RTE_ETH_TX_DESC_FULL;
}
void __rte_cold
txgbe_dev_clear_queues(struct rte_eth_dev *dev)
{