ethdev: fix check of threshold for Tx freeing
The parameter tx_free_thresh is not consistent between the drivers: some use it as rte_eth_tx_burst() requires, some release buffers when the number of free descriptors drop below this value. Let's use it as most fast-path code does, which is the latter, and update comments throughout the code to reflect that. Signed-off-by: Zoltan Kiss <zoltan.kiss@linaro.org> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
parent
b35d0d80f0
commit
72514b5d55
@ -182,7 +182,9 @@ struct em_tx_queue {
|
||||
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
|
||||
uint16_t nb_tx_desc; /**< number of TX descriptors. */
|
||||
uint16_t tx_tail; /**< Current value of TDT register. */
|
||||
uint16_t tx_free_thresh;/**< minimum TX before freeing. */
|
||||
/**< Start freeing TX buffers if there are less free descriptors than
|
||||
this value. */
|
||||
uint16_t tx_free_thresh;
|
||||
/**< Number of TX descriptors to use before RS bit is set. */
|
||||
uint16_t tx_rs_thresh;
|
||||
/** Number of TX descriptors used since RS bit was set. */
|
||||
@ -418,9 +420,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
txe = &sw_ring[tx_id];
|
||||
|
||||
/* Determine if the descriptor ring needs to be cleaned. */
|
||||
if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
|
||||
if (txq->nb_tx_free < txq->tx_free_thresh)
|
||||
em_xmit_cleanup(txq);
|
||||
}
|
||||
|
||||
/* TX loop */
|
||||
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
|
||||
|
@ -200,7 +200,6 @@ struct fm10k_tx_queue {
|
||||
uint16_t next_free;
|
||||
uint16_t nb_free;
|
||||
uint16_t nb_used;
|
||||
uint16_t free_trigger;
|
||||
uint16_t free_thresh;
|
||||
uint16_t rs_thresh;
|
||||
volatile uint32_t *tail_ptr;
|
||||
|
@ -203,7 +203,6 @@ tx_queue_reset(struct fm10k_tx_queue *q)
|
||||
q->next_free = 0;
|
||||
q->nb_used = 0;
|
||||
q->nb_free = q->nb_desc - 1;
|
||||
q->free_trigger = q->nb_free - q->free_thresh;
|
||||
fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
|
||||
FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
|
||||
}
|
||||
|
@ -450,7 +450,7 @@ fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
mb = tx_pkts[count];
|
||||
|
||||
/* running low on descriptors? try to free some... */
|
||||
if (q->nb_free < q->free_trigger)
|
||||
if (q->nb_free < q->free_thresh)
|
||||
tx_free_descriptors(q);
|
||||
|
||||
/* make sure there are enough free descriptors to transmit the
|
||||
|
@ -1236,7 +1236,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
txe = &sw_ring[tx_id];
|
||||
|
||||
/* Check if the descriptor ring needs to be cleaned. */
|
||||
if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh)
|
||||
if (txq->nb_tx_free < txq->tx_free_thresh)
|
||||
i40e_xmit_cleanup(txq);
|
||||
|
||||
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
|
||||
|
@ -136,8 +136,9 @@ struct i40e_tx_queue {
|
||||
uint16_t last_desc_cleaned;
|
||||
/**< Total number of TX descriptors ready to be allocated. */
|
||||
uint16_t nb_tx_free;
|
||||
/**< Number of TX descriptors to use before RS bit is set. */
|
||||
uint16_t tx_free_thresh; /**< minimum TX before freeing. */
|
||||
/**< Start freeing TX buffers if there are less free descriptors than
|
||||
this value. */
|
||||
uint16_t tx_free_thresh;
|
||||
/** Number of TX descriptors to use before RS bit is set. */
|
||||
uint16_t tx_rs_thresh;
|
||||
uint8_t pthresh; /**< Prefetch threshold register. */
|
||||
|
@ -593,9 +593,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
txe = &sw_ring[tx_id];
|
||||
|
||||
/* Determine if the descriptor ring needs to be cleaned. */
|
||||
if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
|
||||
if (txq->nb_tx_free < txq->tx_free_thresh)
|
||||
ixgbe_xmit_cleanup(txq);
|
||||
}
|
||||
|
||||
rte_prefetch0(&txe->mbuf->pool);
|
||||
|
||||
|
@ -195,7 +195,9 @@ struct ixgbe_tx_queue {
|
||||
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
|
||||
uint16_t nb_tx_desc; /**< number of TX descriptors. */
|
||||
uint16_t tx_tail; /**< current value of TDT reg. */
|
||||
uint16_t tx_free_thresh;/**< minimum TX before freeing. */
|
||||
/**< Start freeing TX buffers if there are less free descriptors than
|
||||
this value. */
|
||||
uint16_t tx_free_thresh;
|
||||
/** Number of TX descriptors to use before RS bit is set. */
|
||||
uint16_t tx_rs_thresh;
|
||||
/** Number of TX descriptors used since RS bit was set. */
|
||||
|
@ -744,7 +744,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
nb_used = VIRTQUEUE_NUSED(txvq);
|
||||
|
||||
virtio_rmb();
|
||||
if (likely(nb_used > txvq->vq_free_thresh))
|
||||
if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh))
|
||||
virtio_xmit_cleanup(txvq, nb_used);
|
||||
|
||||
nb_tx = 0;
|
||||
|
@ -632,7 +632,9 @@ struct rte_eth_rxconf {
|
||||
struct rte_eth_txconf {
|
||||
struct rte_eth_thresh tx_thresh; /**< TX ring threshold registers. */
|
||||
uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
|
||||
uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
|
||||
uint16_t tx_free_thresh; /**< Start freeing TX buffers if there are
|
||||
less free descriptors than this value. */
|
||||
|
||||
uint32_t txq_flags; /**< Set flags for the Tx queue */
|
||||
uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
|
||||
};
|
||||
@ -2534,10 +2536,9 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
|
||||
* transparently free the memory buffers of packets previously sent.
|
||||
* This feature is driven by the *tx_free_thresh* value supplied to the
|
||||
* rte_eth_dev_configure() function at device configuration time.
|
||||
* When the number of previously sent packets reached the "minimum transmit
|
||||
* packets to free" threshold, the rte_eth_tx_burst() function must
|
||||
* [attempt to] free the *rte_mbuf* buffers of those packets whose
|
||||
* transmission was effectively completed.
|
||||
* When the number of free TX descriptors drops below this threshold, the
|
||||
* rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf* buffers
|
||||
* of those packets whose transmission was effectively completed.
|
||||
*
|
||||
* @param port_id
|
||||
* The port identifier of the Ethernet device.
|
||||
|
Loading…
Reference in New Issue
Block a user