ethdev: remove legacy Rx descriptor done API

rte_eth_rx_descriptor_status() should be used as a replacement.

Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
This commit is contained in:
Andrew Rybchenko 2021-10-11 16:24:53 +03:00 committed by Ferruh Yigit
parent 1dc6665d36
commit 6c31a8c20a
30 changed files with 5 additions and 224 deletions

View File

@ -651,14 +651,10 @@ Rx descriptor status
--------------------
Supports check the status of a Rx descriptor. When ``rx_descriptor_status`` is
used, status can be "Available", "Done" or "Unavailable". When
``rx_descriptor_done`` is used, status can be "DD bit is set" or "DD bit is
not set".
used, status can be "Available", "Done" or "Unavailable".
* **[implements] rte_eth_dev**: ``rx_descriptor_status``.
* **[related] API**: ``rte_eth_rx_descriptor_status()``.
* **[implements] rte_eth_dev**: ``rx_descriptor_done``.
* **[related] API**: ``rte_eth_rx_descriptor_done()``.
.. _nic_features_tx_descriptor_status:

View File

@ -117,11 +117,6 @@ Deprecation Notices
the device packet overhead can be calculated as:
``(struct rte_eth_dev_info).max_rx_pktlen - (struct rte_eth_dev_info).max_mtu``
* ethdev: ``rx_descriptor_done`` dev_ops and ``rte_eth_rx_descriptor_done``
will be removed in 21.11.
Existing ``rte_eth_rx_descriptor_status`` and ``rte_eth_tx_descriptor_status``
APIs can be used as replacement.
* ethdev: Announce moving from dedicated modify function for each field,
to using the general ``rte_flow_modify_field`` action.

View File

@ -223,6 +223,10 @@ Removed Items
``rte_eth_mirror_rule_reset`` along with the associated macros
``ETH_MIRROR_*`` are removed.
* ethdev: Removed ``rte_eth_rx_descriptor_done`` API function and its
driver callback. It is replaced by the more complete function
``rte_eth_rx_descriptor_status``.
* i40e: Removed i40evf driver.
iavf already became the default VF driver for i40e devices,
so there is no need to maintain i40evf.

View File

@ -402,8 +402,6 @@ int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
@ -479,8 +477,6 @@ int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);

View File

@ -247,7 +247,6 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &eth_em_ops;
eth_dev->rx_queue_count = eth_em_rx_queue_count;
eth_dev->rx_descriptor_done = eth_em_rx_descriptor_done;
eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status;
eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;

View File

@ -1517,23 +1517,6 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return desc;
}
int
eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile struct e1000_rx_desc *rxdp;
struct em_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
return 0;
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
return !!(rxdp->status & E1000_RXD_STAT_DD);
}
int
eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
{

View File

@ -726,7 +726,6 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &eth_igb_ops;
eth_dev->rx_queue_count = eth_igb_rx_queue_count;
eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
@ -920,7 +919,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &igbvf_eth_dev_ops;
eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;

View File

@ -1798,23 +1798,6 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return desc;
}
int
eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
return 0;
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
}
int
eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
{

View File

@ -326,9 +326,6 @@ uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
uint32_t
fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int
fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
int
fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);

View File

@ -3062,7 +3062,6 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &fm10k_eth_dev_ops;
dev->rx_queue_count = fm10k_dev_rx_queue_count;
dev->rx_descriptor_done = fm10k_dev_rx_descriptor_done;
dev->rx_descriptor_status = fm10k_dev_rx_descriptor_status;
dev->tx_descriptor_status = fm10k_dev_tx_descriptor_status;
dev->rx_pkt_burst = &fm10k_recv_pkts;

View File

@ -393,31 +393,6 @@ fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return desc;
}
int
fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union fm10k_rx_desc *rxdp;
struct fm10k_rx_queue *rxq = rx_queue;
uint16_t desc;
int ret;
if (unlikely(offset >= rxq->nb_desc)) {
PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
return 0;
}
desc = rxq->next_dd + offset;
if (desc >= rxq->nb_desc)
desc -= rxq->nb_desc;
rxdp = &rxq->hw_ring[desc];
ret = !!(rxdp->w.status &
rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
return ret;
}
int
fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{

View File

@ -1422,7 +1422,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_queue_count = i40e_dev_rx_queue_count;
dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
dev->rx_pkt_burst = i40e_recv_pkts;

View File

@ -2149,32 +2149,6 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return desc;
}
int
i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union i40e_rx_desc *rxdp;
struct i40e_rx_queue *rxq = rx_queue;
uint16_t desc;
int ret;
if (unlikely(offset >= rxq->nb_rx_desc)) {
PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
return 0;
}
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &(rxq->rx_ring[desc]);
ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
(1 << I40E_RX_DESC_STATUS_DD_SHIFT));
return ret;
}
int
i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{

View File

@ -231,7 +231,6 @@ void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);

View File

@ -1227,7 +1227,6 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
dev->dev_ops = &eth_igc_ops;
dev->rx_descriptor_done = eth_igc_rx_descriptor_done;
dev->rx_queue_count = eth_igc_rx_queue_count;
dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
dev->tx_descriptor_status = eth_igc_tx_descriptor_status;

View File

@ -757,24 +757,6 @@ uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
return desc;
}
int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union igc_adv_rx_desc *rxdp;
struct igc_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(!rxq || offset >= rxq->nb_rx_desc))
return 0;
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
return !!(rxdp->wb.upper.status_error &
rte_cpu_to_le_32(IGC_RXD_STAT_DD));
}
int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
struct igc_rx_queue *rxq = rx_queue;

View File

@ -25,8 +25,6 @@ int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset);
int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset);

View File

@ -1050,7 +1050,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count;
eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
@ -1539,7 +1538,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;

View File

@ -597,8 +597,6 @@ int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);

View File

@ -3285,24 +3285,6 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return desc;
}
int
ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union ixgbe_adv_rx_desc *rxdp;
struct ixgbe_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
return 0;
desc = rxq->rx_tail + offset;
if (desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
return !!(rxdp->wb.upper.status_error &
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
int
ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{

View File

@ -2440,7 +2440,6 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
int rc, max_entries;
eth_dev->dev_ops = &otx2_eth_dev_ops;
eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done;
eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;

View File

@ -433,7 +433,6 @@ int otx2_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_burst_mode *mode);
uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx);
int otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
int otx2_nix_rx_descriptor_done(void *rxq, uint16_t offset);
int otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset);
int otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset);

View File

@ -365,18 +365,6 @@ nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
return 0;
}
int
otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
struct otx2_eth_rxq *rxq = rx_queue;
uint32_t head, tail;
nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
&head, &tail, rxq->rq);
return nix_offset_has_packet(head, tail, offset);
}
int
otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
{

View File

@ -1297,21 +1297,6 @@ sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
return sap->dp_rx->qdesc_npending(rxq_info->dp);
}
/*
* The function is used by the secondary process as well. It must not
* use any process-local pointers from the adapter data.
*/
static int
sfc_rx_descriptor_done(void *queue, uint16_t offset)
{
struct sfc_dp_rxq *dp_rxq = queue;
const struct sfc_dp_rx *dp_rx;
dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
return offset < dp_rx->qdesc_npending(dp_rxq);
}
/*
* The function is used by the secondary process as well. It must not
* use any process-local pointers from the adapter data.
@ -2046,7 +2031,6 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_ops;
@ -2154,7 +2138,6 @@ sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_secondary_ops;

View File

@ -1899,7 +1899,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
set_rxtx_funcs(eth_dev);

View File

@ -58,8 +58,6 @@ void virtio_dev_cq_start(struct rte_eth_dev *dev);
* RX/TX function prototypes
*/
int virtio_dev_rx_queue_done(void *rxq, uint16_t offset);
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,

View File

@ -39,15 +39,6 @@
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
int
virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
{
struct virtnet_rx *rxvq = rxq;
struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
return virtqueue_nused(vq) >= offset;
}
void
vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
{

View File

@ -588,7 +588,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->rx_queue_count = NULL;
eth_dev->rx_descriptor_done = NULL;
eth_dev->rx_descriptor_status = NULL;
eth_dev->tx_descriptor_status = NULL;
eth_dev->dev_ops = NULL;

View File

@ -4989,31 +4989,6 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
return (int)(*dev->rx_queue_count)(dev, queue_id);
}
/**
* Check if the DD bit of the specific RX descriptor in the queue has been set
*
* @param port_id
* The port identifier of the Ethernet device.
* @param queue_id
* The queue id on the specific port.
* @param offset
* The offset of the descriptor ID from tail.
* @return
* - (1) if the specific DD bit is set.
* - (0) if the specific DD bit is not set.
* - (-ENODEV) if *port_id* invalid.
* - (-ENOTSUP) if the device does not support this function
*/
__rte_deprecated
static inline int
rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
}
/**@{@name Rx hardware descriptor states
* @see rte_eth_rx_descriptor_status
*/

View File

@ -45,9 +45,6 @@ typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
/**< @internal Get number of used descriptors on a receive queue. */
typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
/**< @internal Check DD bit of specific RX descriptor */
typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
/**< @internal Check the status of a Rx descriptor */
@ -85,7 +82,6 @@ struct rte_eth_dev {
eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */
eth_tx_descriptor_status_t tx_descriptor_status; /**< Check the status of a Tx descriptor. */