net/virtio: fix packet corruption
The support of virtio-user changed the way the mbuf dma address is
retrieved, using a physical address in case of virtio-pci and a virtual
address in case of virtio-user.
This change introduced some possible memory corruption in packets,
replacing:
m->buf_physaddr + RTE_PKTMBUF_HEADROOM
by:
m->buf_physaddr + m->data_off (through a macro)
This patch fixes this issue, restoring the original behavior.
By the way, it also rework the macros, adding a "VIRTIO_" prefix and
API comments.
Fixes: f24f8f9fee
("net/virtio: allow virtual address to fill vring descriptors")
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
This commit is contained in:
parent
164fd39678
commit
25f80d1087
@ -454,7 +454,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
/* For virtio-user case (that is when dev->pci_dev is NULL), we use
|
||||
* virtual address. And we need properly set _offset_, please see
|
||||
* MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
|
||||
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
|
||||
*/
|
||||
if (dev->pci_dev)
|
||||
vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
|
||||
|
@ -193,7 +193,8 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
|
||||
|
||||
start_dp = vq->vq_ring.desc;
|
||||
start_dp[idx].addr =
|
||||
MBUF_DATA_DMA_ADDR(cookie, vq->offset) - hw->vtnet_hdr_size;
|
||||
VIRTIO_MBUF_ADDR(cookie, vq) +
|
||||
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
|
||||
start_dp[idx].len =
|
||||
cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
|
||||
start_dp[idx].flags = VRING_DESC_F_WRITE;
|
||||
@ -265,7 +266,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
|
||||
}
|
||||
|
||||
do {
|
||||
start_dp[idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset);
|
||||
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
|
||||
start_dp[idx].len = cookie->data_len;
|
||||
start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
|
||||
idx = start_dp[idx].next;
|
||||
|
@ -80,8 +80,9 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
|
||||
vq->sw_ring[desc_idx] = cookie;
|
||||
|
||||
start_dp = vq->vq_ring.desc;
|
||||
start_dp[desc_idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset) -
|
||||
vq->hw->vtnet_hdr_size;
|
||||
start_dp[desc_idx].addr =
|
||||
VIRTIO_MBUF_ADDR(cookie, vq) +
|
||||
RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
|
||||
start_dp[desc_idx].len = cookie->buf_len -
|
||||
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
|
||||
|
||||
@ -120,8 +121,8 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
|
||||
*(uint64_t *)p = rxvq->mbuf_initializer;
|
||||
|
||||
start_dp[i].addr =
|
||||
MBUF_DATA_DMA_ADDR(sw_ring[i], vq->offset) -
|
||||
vq->hw->vtnet_hdr_size;
|
||||
VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
|
||||
RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
|
||||
start_dp[i].len = sw_ring[i]->buf_len -
|
||||
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
|
||||
}
|
||||
@ -371,7 +372,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
for (i = 0; i < nb_tail; i++) {
|
||||
start_dp[desc_idx].addr =
|
||||
MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
|
||||
VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
|
||||
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
|
||||
tx_pkts++;
|
||||
desc_idx++;
|
||||
@ -383,7 +384,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
|
||||
for (i = 0; i < nb_commit; i++) {
|
||||
start_dp[desc_idx].addr =
|
||||
MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
|
||||
VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
|
||||
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
|
||||
tx_pkts++;
|
||||
desc_idx++;
|
||||
|
@ -67,12 +67,21 @@ struct rte_mbuf;
|
||||
#define VIRTQUEUE_MAX_NAME_SZ 32
|
||||
|
||||
#ifdef RTE_VIRTIO_USER
|
||||
#define MBUF_DATA_DMA_ADDR(mb, offset) \
|
||||
((uint64_t)((uintptr_t)(*(void **)((uintptr_t)mb + offset)) \
|
||||
+ (mb)->data_off))
|
||||
#else /* RTE_VIRTIO_USER */
|
||||
#define MBUF_DATA_DMA_ADDR(mb, offset) rte_mbuf_data_dma_addr(mb)
|
||||
#endif /* RTE_VIRTIO_USER */
|
||||
/**
|
||||
* Return the physical address (or virtual address in case of
|
||||
* virtio-user) of mbuf data buffer.
|
||||
*/
|
||||
#define VIRTIO_MBUF_ADDR(mb, vq) (*(uint64_t *)((uintptr_t)(mb) + (vq)->offset))
|
||||
#else
|
||||
#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Return the physical address (or virtual address in case of
|
||||
* virtio-user) of mbuf data buffer, taking care of mbuf data offset
|
||||
*/
|
||||
#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
|
||||
(VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
|
||||
|
||||
#define VTNET_SQ_RQ_QUEUE_IDX 0
|
||||
#define VTNET_SQ_TQ_QUEUE_IDX 1
|
||||
@ -182,8 +191,8 @@ struct virtqueue {
|
||||
void *vq_ring_virt_mem; /**< linear address of vring*/
|
||||
unsigned int vq_ring_size;
|
||||
|
||||
phys_addr_t vq_ring_mem; /**< physical address of vring */
|
||||
/**< use virtual address for virtio-user. */
|
||||
phys_addr_t vq_ring_mem; /**< physical address of vring,
|
||||
* or virtual address for virtio-user. */
|
||||
|
||||
/**
|
||||
* Head of the free chain in the descriptor table. If
|
||||
|
Loading…
Reference in New Issue
Block a user