vhost: simplify descriptor buffer prefetching

Now that we have a single function to map the descriptors
buffers, let's prefetch them there as it is the earliest
place we can do it.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
This commit is contained in:
Maxime Coquelin 2019-05-29 15:04:19 +02:00 committed by Ferruh Yigit
parent 084fac96ca
commit d1134c09e3

View File

@ -286,6 +286,8 @@ map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (unlikely(!desc_addr))
return -1;
rte_prefetch0((void *)(uintptr_t)desc_addr);
buf_vec[vec_id].buf_iova = desc_iova;
buf_vec[vec_id].buf_addr = desc_addr;
buf_vec[vec_id].buf_len = desc_chunck_len;
@ -666,9 +668,6 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
if (nr_vec > 1)
rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
error = -1;
goto out;
@ -711,10 +710,6 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
/* Prefetch next buffer address. */
if (vec_idx + 1 < nr_vec)
rte_prefetch0((void *)(uintptr_t)
buf_vec[vec_idx + 1].buf_addr);
buf_offset = 0;
buf_avail = buf_len;
}
@ -812,8 +807,6 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
break;
}
rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
@ -861,8 +854,6 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
break;
}
rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
@ -1119,9 +1110,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
goto out;
}
if (likely(nr_vec > 1))
rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
if (virtio_net_with_host_offload(dev)) {
if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
/*
@ -1132,7 +1120,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
hdr = &tmp_hdr;
} else {
hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
rte_prefetch0(hdr);
}
}
@ -1162,9 +1149,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
}
rte_prefetch0((void *)(uintptr_t)
(buf_addr + buf_offset));
PRINT_PACKET(dev,
(uintptr_t)(buf_addr + buf_offset),
(uint32_t)buf_avail, 0);
@ -1230,14 +1214,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
/*
* Prefecth desc n + 1 buffer while
* desc n buffer is processed.
*/
if (vec_idx + 1 < nr_vec)
rte_prefetch0((void *)(uintptr_t)
buf_vec[vec_idx + 1].buf_addr);
buf_offset = 0;
buf_avail = buf_len;
@ -1381,8 +1357,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (likely(dev->dequeue_zero_copy == 0))
update_shadow_used_ring_split(vq, head_idx, 0);
rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
RTE_LOG(ERR, VHOST_DATA,
@ -1492,8 +1466,6 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
update_shadow_used_ring_packed(vq, buf_id, 0,
desc_count);
rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
RTE_LOG(ERR, VHOST_DATA,