net/virtio: add packet injection method

This patch adds dev_pause, dev_resume and inject_pkts APIs to allow
driver to pause the worker threads and inject special packets into
Tx queue. The next patch will be based on this.

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Xiao Wang 2018-01-10 09:23:53 +08:00 committed by Ferruh Yigit
parent a2ffb87b1b
commit 1978a9dc57
5 changed files with 70 additions and 2 deletions

View File

@ -26,6 +26,7 @@
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_dev.h>
#include <rte_cycles.h>
#include "virtio_ethdev.h"
#include "virtio_pci.h"
@ -1223,6 +1224,57 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
return 0;
}
int
virtio_dev_pause(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
rte_spinlock_lock(&hw->state_lock);
if (hw->started == 0) {
/* Device is just stopped. */
rte_spinlock_unlock(&hw->state_lock);
return -1;
}
hw->started = 0;
/*
* Prevent the worker threads from touching queues to avoid contention,
* 1 ms should be enough for the ongoing Tx function to finish.
*/
rte_delay_ms(1);
return 0;
}
/*
* Recover hw state to let the worker threads continue.
*/
void
virtio_dev_resume(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
hw->started = 1;
rte_spinlock_unlock(&hw->state_lock);
}
/*
* Should be called only after device is paused.
*/
int
virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
int nb_pkts)
{
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_tx *txvq = dev->data->tx_queues[0];
int ret;
hw->inject_pkts = tx_pkts;
ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
hw->inject_pkts = NULL;
return ret;
}
/*
* Process Virtio Config changed interrupt and call the callback
* if link state changed.
@ -1762,6 +1814,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EBUSY;
}
rte_spinlock_init(&hw->state_lock);
hw->use_simple_rx = 1;
hw->use_simple_tx = 1;
@ -1928,12 +1982,14 @@ virtio_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "stop");
rte_spinlock_lock(&hw->state_lock);
if (intr_conf->lsc || intr_conf->rxq)
virtio_intr_disable(dev);
hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
rte_spinlock_unlock(&hw->state_lock);
}
static int

View File

@ -87,4 +87,9 @@ int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
void virtio_interrupt_handler(void *param);
int virtio_dev_pause(struct rte_eth_dev *dev);
void virtio_dev_resume(struct rte_eth_dev *dev);
int virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
int nb_pkts);
#endif /* _VIRTIO_ETHDEV_H_ */

View File

@ -241,6 +241,13 @@ struct virtio_hw {
struct virtio_pci_common_cfg *common_cfg;
struct virtio_net_config *dev_cfg;
void *virtio_user_dev;
/*
* App management thread and virtio interrupt handler thread
* both can change device state, this lock is meant to avoid
* such a contention.
*/
rte_spinlock_t state_lock;
struct rte_mbuf **inject_pkts;
struct virtqueue **vqs;
};

View File

@ -988,7 +988,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_used, nb_tx = 0;
int error;
if (unlikely(hw->started == 0))
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
if (unlikely(nb_pkts < 1))

View File

@ -70,7 +70,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
uint16_t nb_tx = 0;
if (unlikely(hw->started == 0))
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
nb_used = VIRTQUEUE_NUSED(vq);