net/virtio-user: fix packed ring server mode

This patch fixes the situation where data path does not work properly
when vhost reconnects to virtio in server mode with packed ring.

Currently, virtio and vhost share memory of vring. For split ring, vhost
can read the status of descriptors directly from the available ring and
the used ring during reconnection. Therefore, the data path can
continue.

But for packed ring, when reconnecting to virtio, vhost cannot get the
status of descriptors via the descriptor ring. By resetting Tx
and Rx queues, the data path can restart from the beginning.

Fixes: 4c3f5822eb21 ("net/virtio: add packed virtqueue defines")
Cc: stable@dpdk.org

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Xuan Ding 2020-01-15 06:13:58 +00:00 committed by Ferruh Yigit
parent 5efb18e85f
commit 6ebbf4109f
4 changed files with 120 additions and 2 deletions

View File

@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
goto err_vtpci_init;
}
rte_spinlock_init(&hw->state_lock);
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -EBUSY;
}
rte_spinlock_init(&hw->state_lock);
hw->use_simple_rx = 1;
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {

View File

@ -13,6 +13,7 @@
#include <rte_ethdev_vdev.h>
#include <rte_bus_vdev.h>
#include <rte_alarm.h>
#include <rte_cycles.h>
#include "virtio_ethdev.h"
#include "virtio_logs.h"
@ -25,12 +26,48 @@
#define virtio_user_get_dev(hw) \
((struct virtio_user_dev *)(hw)->virtio_user_dev)
static void
virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq;
uint16_t i;
/* Add lock to avoid queue contention. */
rte_spinlock_lock(&hw->state_lock);
hw->started = 0;
/*
* Waitting for datapath to complete before resetting queues.
* 1 ms should be enough for the ongoing Tx/Rx function to finish.
*/
rte_delay_ms(1);
/* Vring reset for each Tx queue and Rx queue. */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxvq = dev->data->rx_queues[i];
virtqueue_rxvq_reset_packed(rxvq->vq);
virtio_dev_rx_queue_setup_finish(dev, i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txvq = dev->data->tx_queues[i];
virtqueue_txvq_reset_packed(txvq->vq);
}
hw->started = 1;
rte_spinlock_unlock(&hw->state_lock);
}
static int
virtio_user_server_reconnect(struct virtio_user_dev *dev)
{
int ret;
int connectfd;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
struct virtio_hw *hw = eth_dev->data->dev_private;
connectfd = accept(dev->listenfd, NULL, NULL);
if (connectfd < 0)
@ -51,6 +88,12 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
dev->features &= dev->device_features;
/* For packed ring, resetting queues is required in reconnection. */
if (vtpci_packed_queue(hw))
PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
" when packed ring reconnecting.");
virtio_user_reset_queues_packed(eth_dev);
ret = virtio_user_start_device(dev);
if (ret < 0)
return -1;

View File

@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
else
virtqueue_rxvq_flush_split(vq);
}
int
virtqueue_rxvq_reset_packed(struct virtqueue *vq)
{
int size = vq->vq_nentries;
struct vq_desc_extra *dxp;
struct virtnet_rx *rxvq;
uint16_t desc_idx;
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
vq->vq_packed.used_wrap_counter = 1;
vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
vq->vq_packed.event_flags_shadow = 0;
vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
rxvq = &vq->rxq;
memset(rxvq->mz->addr, 0, rxvq->mz->len);
for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
dxp = &vq->vq_descx[desc_idx];
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
}
vring_desc_init_packed(vq, size);
return 0;
}
int
virtqueue_txvq_reset_packed(struct virtqueue *vq)
{
int size = vq->vq_nentries;
struct vq_desc_extra *dxp;
struct virtnet_tx *txvq;
uint16_t desc_idx;
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
vq->vq_packed.used_wrap_counter = 1;
vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
vq->vq_packed.event_flags_shadow = 0;
txvq = &vq->txq;
memset(txvq->mz->addr, 0, txvq->mz->len);
memset(txvq->virtio_net_hdr_mz->addr, 0,
txvq->virtio_net_hdr_mz->len);
for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
dxp = &vq->vq_descx[desc_idx];
if (dxp->cookie != NULL) {
rte_pktmbuf_free(dxp->cookie);
dxp->cookie = NULL;
}
}
vring_desc_init_packed(vq, size);
return 0;
}

View File

@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
/* Flush the elements in the used ring. */
void virtqueue_rxvq_flush(struct virtqueue *vq);
int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
int virtqueue_txvq_reset_packed(struct virtqueue *vq);
static inline int
virtqueue_full(const struct virtqueue *vq)
{