net/virtio: fix resuming port with Rx vector path

Since commit efc83a1e7fc3 ("net/virtio: fix queue setup consistency"),
when resuming a virtio port, the rx rings are refilled with new mbufs
until they are full (vq->vq_free_cnt == 0). This is done without
ensuring that the descriptor index remains a multiple of
RTE_VIRTIO_VPMD_RX_REARM_THRESH, which is a prerequisite when using the
vector mode. This can cause an out of bound access in the rx ring.

This commit changes the vector refill method from
virtqueue_enqueue_recv_refill_simple() to virtio_rxq_rearm_vec(), which
properly checks that the refill is done by batch of
RTE_VIRTIO_VPMD_RX_REARM_THRESH.

As virtqueue_enqueue_recv_refill_simple() is no more used, this
patch also removes the function.

Fixes: efc83a1e7fc3 ("net/virtio: fix queue setup consistency")
Cc: stable@dpdk.org

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Jianfeng Tan <jianfeng.tan@intel.com>
This commit is contained in:
Maxime Coquelin 2018-02-12 16:46:11 +01:00 committed by Thomas Monjalon
parent 9fedc2da38
commit 4785747066
4 changed files with 25 additions and 53 deletions

View File

@ -30,6 +30,7 @@
#include "virtio_pci.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"
#include "virtio_rxtx_simple.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@ -437,6 +438,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
vq->vq_ring.desc[desc_idx].flags =
VRING_DESC_F_WRITE;
}
virtio_rxq_vec_setup(rxvq);
}
memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
@ -446,30 +449,31 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
&rxvq->fake_mbuf;
}
while (!virtqueue_full(vq)) {
m = rte_mbuf_raw_alloc(rxvq->mpool);
if (m == NULL)
break;
/* Enqueue allocated buffers */
if (hw->use_simple_rx)
error = virtqueue_enqueue_recv_refill_simple(vq, m);
else
error = virtqueue_enqueue_recv_refill(vq, m);
if (error) {
rte_pktmbuf_free(m);
break;
if (hw->use_simple_rx) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
}
nbufs++;
} else {
while (!virtqueue_full(vq)) {
m = rte_mbuf_raw_alloc(rxvq->mpool);
if (m == NULL)
break;
/* Enqueue allocated buffers */
error = virtqueue_enqueue_recv_refill(vq, m);
if (error) {
rte_pktmbuf_free(m);
break;
}
nbufs++;
}
vq_update_avail_idx(vq);
}
vq_update_avail_idx(vq);
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
virtio_rxq_vec_setup(rxvq);
VIRTQUEUE_DUMP(vq);
return 0;

View File

@ -60,7 +60,4 @@ struct virtnet_ctl {
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
struct rte_mbuf *m);
#endif /* _VIRTIO_RXTX_H_ */

View File

@ -27,35 +27,6 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
int __attribute__((cold))
virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
struct rte_mbuf *cookie)
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
uint16_t desc_idx;
cookie->port = vq->rxq.port_id;
cookie->data_off = RTE_PKTMBUF_HEADROOM;
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
dxp = &vq->vq_descx[desc_idx];
dxp->cookie = (void *)cookie;
vq->sw_ring[desc_idx] = cookie;
start_dp = vq->vq_ring.desc;
start_dp[desc_idx].addr =
VIRTIO_MBUF_ADDR(cookie, vq) +
RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
start_dp[desc_idx].len = cookie->buf_len -
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
vq->vq_free_cnt--;
vq->vq_avail_idx++;
return 0;
}
uint16_t
virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@ -78,7 +49,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_compiler_barrier();
if (nb_used >= VIRTIO_TX_FREE_THRESH)
virtio_xmit_cleanup(vq);
virtio_xmit_cleanup_simple(vq);
nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);

View File

@ -60,7 +60,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
#define VIRTIO_TX_FREE_NR 32
/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
static inline void
virtio_xmit_cleanup(struct virtqueue *vq)
virtio_xmit_cleanup_simple(struct virtqueue *vq)
{
uint16_t i, desc_idx;
uint32_t nb_free = 0;