net/virtio: remove simple Tx path
The simple Tx path does not comply with the Virtio specification. Now that VIRTIO_F_IN_ORDER feature is supported by the Virtio PMD, let's use this optimized path instead. Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
This commit is contained in:
parent
1ff30d182c
commit
57f818963d
@ -1336,11 +1336,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
|
||||
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
|
||||
}
|
||||
|
||||
if (hw->use_simple_tx) {
|
||||
PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
|
||||
eth_dev->data->port_id);
|
||||
eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
|
||||
} else if (hw->use_inorder_tx) {
|
||||
if (hw->use_inorder_tx) {
|
||||
PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
|
||||
eth_dev->data->port_id);
|
||||
eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
|
||||
@ -1881,12 +1877,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
|
||||
rte_spinlock_init(&hw->state_lock);
|
||||
|
||||
hw->use_simple_rx = 1;
|
||||
hw->use_simple_tx = 1;
|
||||
|
||||
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
|
||||
/* Simple Tx not compatible with in-order ring */
|
||||
hw->use_inorder_tx = 1;
|
||||
hw->use_simple_tx = 0;
|
||||
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
|
||||
hw->use_inorder_rx = 1;
|
||||
hw->use_simple_rx = 0;
|
||||
@ -1898,12 +1891,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
|
||||
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
|
||||
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
|
||||
hw->use_simple_rx = 0;
|
||||
hw->use_simple_tx = 0;
|
||||
}
|
||||
#endif
|
||||
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
|
||||
hw->use_simple_rx = 0;
|
||||
hw->use_simple_tx = 0;
|
||||
}
|
||||
|
||||
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
|
||||
|
@ -238,7 +238,6 @@ struct virtio_hw {
|
||||
uint8_t use_msix;
|
||||
uint8_t modern;
|
||||
uint8_t use_simple_rx;
|
||||
uint8_t use_simple_tx;
|
||||
uint8_t use_inorder_rx;
|
||||
uint8_t use_inorder_tx;
|
||||
uint16_t port_id;
|
||||
|
@ -708,10 +708,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
/* cannot use simple rxtx funcs with multisegs or offloads */
|
||||
if (dev->data->dev_conf.txmode.offloads)
|
||||
hw->use_simple_tx = 0;
|
||||
|
||||
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
|
||||
nb_desc = vq->vq_nentries;
|
||||
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
|
||||
@ -746,33 +742,11 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
|
||||
uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
|
||||
struct virtio_hw *hw = dev->data->dev_private;
|
||||
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
|
||||
uint16_t mid_idx = vq->vq_nentries >> 1;
|
||||
struct virtnet_tx *txvq = &vq->txq;
|
||||
uint16_t desc_idx;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (hw->use_simple_tx) {
|
||||
for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
|
||||
vq->vq_ring.avail->ring[desc_idx] =
|
||||
desc_idx + mid_idx;
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].next =
|
||||
desc_idx;
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].addr =
|
||||
txvq->virtio_net_hdr_mem +
|
||||
offsetof(struct virtio_tx_region, tx_hdr);
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].len =
|
||||
vq->hw->vtnet_hdr_size;
|
||||
vq->vq_ring.desc[desc_idx + mid_idx].flags =
|
||||
VRING_DESC_F_NEXT;
|
||||
vq->vq_ring.desc[desc_idx].flags = 0;
|
||||
}
|
||||
for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
|
||||
desc_idx++)
|
||||
vq->vq_ring.avail->ring[desc_idx] = desc_idx;
|
||||
} else if (hw->use_inorder_tx) {
|
||||
if (hw->use_inorder_tx)
|
||||
vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
|
||||
}
|
||||
|
||||
VIRTQUEUE_DUMP(vq);
|
||||
|
||||
|
@ -27,73 +27,6 @@
|
||||
#pragma GCC diagnostic ignored "-Wcast-qual"
|
||||
#endif
|
||||
|
||||
uint16_t
|
||||
virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts)
|
||||
{
|
||||
struct virtnet_tx *txvq = tx_queue;
|
||||
struct virtqueue *vq = txvq->vq;
|
||||
struct virtio_hw *hw = vq->hw;
|
||||
uint16_t nb_used;
|
||||
uint16_t desc_idx;
|
||||
struct vring_desc *start_dp;
|
||||
uint16_t nb_tail, nb_commit;
|
||||
int i;
|
||||
uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
|
||||
uint16_t nb_tx = 0;
|
||||
|
||||
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
|
||||
return nb_tx;
|
||||
|
||||
nb_used = VIRTQUEUE_NUSED(vq);
|
||||
rte_compiler_barrier();
|
||||
|
||||
if (nb_used >= VIRTIO_TX_FREE_THRESH)
|
||||
virtio_xmit_cleanup_simple(vq);
|
||||
|
||||
nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
|
||||
desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
|
||||
start_dp = vq->vq_ring.desc;
|
||||
nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
|
||||
|
||||
if (nb_commit >= nb_tail) {
|
||||
for (i = 0; i < nb_tail; i++)
|
||||
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
for (i = 0; i < nb_tail; i++) {
|
||||
start_dp[desc_idx].addr =
|
||||
VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
|
||||
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
|
||||
tx_pkts++;
|
||||
desc_idx++;
|
||||
}
|
||||
nb_commit -= nb_tail;
|
||||
desc_idx = 0;
|
||||
}
|
||||
for (i = 0; i < nb_commit; i++)
|
||||
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
for (i = 0; i < nb_commit; i++) {
|
||||
start_dp[desc_idx].addr =
|
||||
VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
|
||||
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
|
||||
tx_pkts++;
|
||||
desc_idx++;
|
||||
}
|
||||
|
||||
rte_compiler_barrier();
|
||||
|
||||
vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
|
||||
vq->vq_avail_idx += nb_pkts;
|
||||
vq->vq_ring.avail->idx = vq->vq_avail_idx;
|
||||
txvq->stats.packets += nb_pkts;
|
||||
|
||||
if (likely(nb_pkts)) {
|
||||
if (unlikely(virtqueue_kick_prepare(vq)))
|
||||
virtqueue_notify(vq);
|
||||
}
|
||||
|
||||
return nb_pkts;
|
||||
}
|
||||
|
||||
int __attribute__((cold))
|
||||
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
|
||||
{
|
||||
|
@ -55,53 +55,4 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
|
||||
vq_update_avail_idx(vq);
|
||||
}
|
||||
|
||||
#define VIRTIO_TX_FREE_THRESH 32
|
||||
#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
|
||||
#define VIRTIO_TX_FREE_NR 32
|
||||
/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
|
||||
static inline void
|
||||
virtio_xmit_cleanup_simple(struct virtqueue *vq)
|
||||
{
|
||||
uint16_t i, desc_idx;
|
||||
uint32_t nb_free = 0;
|
||||
struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
|
||||
|
||||
desc_idx = (uint16_t)(vq->vq_used_cons_idx &
|
||||
((vq->vq_nentries >> 1) - 1));
|
||||
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
|
||||
m = rte_pktmbuf_prefree_seg(m);
|
||||
if (likely(m != NULL)) {
|
||||
free[0] = m;
|
||||
nb_free = 1;
|
||||
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
|
||||
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
|
||||
m = rte_pktmbuf_prefree_seg(m);
|
||||
if (likely(m != NULL)) {
|
||||
if (likely(m->pool == free[0]->pool))
|
||||
free[nb_free++] = m;
|
||||
else {
|
||||
rte_mempool_put_bulk(free[0]->pool,
|
||||
(void **)free,
|
||||
RTE_MIN(RTE_DIM(free),
|
||||
nb_free));
|
||||
free[0] = m;
|
||||
nb_free = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
rte_mempool_put_bulk(free[0]->pool, (void **)free,
|
||||
RTE_MIN(RTE_DIM(free), nb_free));
|
||||
} else {
|
||||
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
|
||||
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
|
||||
m = rte_pktmbuf_prefree_seg(m);
|
||||
if (m != NULL)
|
||||
rte_mempool_put(m->pool, m);
|
||||
}
|
||||
}
|
||||
|
||||
vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
|
||||
vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
|
||||
}
|
||||
|
||||
#endif /* _VIRTIO_RXTX_SIMPLE_H_ */
|
||||
|
@ -436,7 +436,6 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
|
||||
hw->use_msix = 1;
|
||||
hw->modern = 0;
|
||||
hw->use_simple_rx = 0;
|
||||
hw->use_simple_tx = 0;
|
||||
hw->use_inorder_rx = 0;
|
||||
hw->use_inorder_tx = 0;
|
||||
hw->virtio_user_dev = dev;
|
||||
|
Loading…
Reference in New Issue
Block a user