numam-dpdk/drivers/net/virtio/virtio_rxtx.h
Maxime Coquelin 4785747066 net/virtio: fix resuming port with Rx vector path
Since commit efc83a1e7f ("net/virtio: fix queue setup consistency"),
when resuming a virtio port, the rx rings are refilled with new mbufs
until they are full (vq->vq_free_cnt == 0). This is done without
ensuring that the descriptor index remains a multiple of
RTE_VIRTIO_VPMD_RX_REARM_THRESH, which is a prerequisite when using the
vector mode. This can cause an out of bound access in the rx ring.

This commit changes the vector refill method from
virtqueue_enqueue_recv_refill_simple() to virtio_rxq_rearm_vec(), which
properly checks that the refill is done by batch of
RTE_VIRTIO_VPMD_RX_REARM_THRESH.

As virtqueue_enqueue_recv_refill_simple() is no more used, this
patch also removes the function.

Fixes: efc83a1e7f ("net/virtio: fix queue setup consistency")
Cc: stable@dpdk.org

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Jianfeng Tan <jianfeng.tan@intel.com>
2018-02-13 18:57:59 +01:00

64 lines
1.8 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2015 Intel Corporation
*/
#ifndef _VIRTIO_RXTX_H_
#define _VIRTIO_RXTX_H_
#define RTE_PMD_VIRTIO_RX_MAX_BURST 64
struct virtnet_stats {
uint64_t packets;
uint64_t bytes;
uint64_t errors;
uint64_t multicast;
uint64_t broadcast;
/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
uint64_t size_bins[8];
};
struct virtnet_rx {
struct virtqueue *vq;
/* dummy mbuf, for wraparound when processing RX ring. */
struct rte_mbuf fake_mbuf;
uint64_t mbuf_initializer; /**< value to init mbufs. */
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
uint16_t queue_id; /**< DPDK queue index. */
uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
};
struct virtnet_tx {
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t queue_id; /**< DPDK queue index. */
uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
const struct rte_memzone *mz; /**< mem zone to populate TX ring. */
};
struct virtnet_ctl {
struct virtqueue *vq;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t port_id; /**< Device port identifier. */
const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
rte_spinlock_t lock; /**< spinlock for control queue. */
};
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
#endif /* _VIRTIO_RXTX_H_ */