vhost: add shadow used ring support for packed rings
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com> Acked-by: Tiwei Bie <tiwei.bie@intel.com>
This commit is contained in:
parent
3e2f9700bb
commit
37f5e79a27
@ -93,9 +93,12 @@ cleanup_device(struct virtio_net *dev, int destroy)
|
||||
}
|
||||
|
||||
void
|
||||
free_vq(struct vhost_virtqueue *vq)
|
||||
free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
|
||||
{
|
||||
rte_free(vq->shadow_used_ring);
|
||||
if (vq_is_packed(dev))
|
||||
rte_free(vq->shadow_used_packed);
|
||||
else
|
||||
rte_free(vq->shadow_used_split);
|
||||
rte_free(vq->batch_copy_elems);
|
||||
rte_mempool_free(vq->iotlb_pool);
|
||||
rte_free(vq);
|
||||
@ -110,7 +113,7 @@ free_device(struct virtio_net *dev)
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < dev->nr_vring; i++)
|
||||
free_vq(dev->virtqueue[i]);
|
||||
free_vq(dev, dev->virtqueue[i]);
|
||||
|
||||
rte_free(dev);
|
||||
}
|
||||
|
@ -80,6 +80,12 @@ struct log_cache_entry {
|
||||
unsigned long val;
|
||||
};
|
||||
|
||||
struct vring_used_elem_packed {
|
||||
uint16_t id;
|
||||
uint32_t len;
|
||||
uint32_t count;
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure contains variables relevant to RX/TX virtqueues.
|
||||
*/
|
||||
@ -119,7 +125,10 @@ struct vhost_virtqueue {
|
||||
struct zcopy_mbuf *zmbufs;
|
||||
struct zcopy_mbuf_list zmbuf_list;
|
||||
|
||||
struct vring_used_elem *shadow_used_ring;
|
||||
union {
|
||||
struct vring_used_elem *shadow_used_split;
|
||||
struct vring_used_elem_packed *shadow_used_packed;
|
||||
};
|
||||
uint16_t shadow_used_idx;
|
||||
struct vhost_vring_addr ring_addrs;
|
||||
|
||||
@ -587,7 +596,7 @@ void vhost_destroy_device(int);
|
||||
void vhost_destroy_device_notify(struct virtio_net *dev);
|
||||
|
||||
void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
|
||||
void free_vq(struct vhost_virtqueue *vq);
|
||||
void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
|
||||
|
||||
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
|
||||
|
||||
|
@ -233,7 +233,7 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
|
||||
|
||||
dev->virtqueue[dev->nr_vring] = NULL;
|
||||
cleanup_vq(vq, 1);
|
||||
free_vq(vq);
|
||||
free_vq(dev, vq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -282,13 +282,26 @@ vhost_user_set_vring_num(struct virtio_net *dev,
|
||||
TAILQ_INIT(&vq->zmbuf_list);
|
||||
}
|
||||
|
||||
vq->shadow_used_ring = rte_malloc(NULL,
|
||||
if (vq_is_packed(dev)) {
|
||||
vq->shadow_used_packed = rte_malloc(NULL,
|
||||
vq->size *
|
||||
sizeof(struct vring_used_elem_packed),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (!vq->shadow_used_packed) {
|
||||
RTE_LOG(ERR, VHOST_CONFIG,
|
||||
"failed to allocate memory for shadow used ring.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
} else {
|
||||
vq->shadow_used_split = rte_malloc(NULL,
|
||||
vq->size * sizeof(struct vring_used_elem),
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (!vq->shadow_used_ring) {
|
||||
RTE_LOG(ERR, VHOST_CONFIG,
|
||||
"failed to allocate memory for shadow used ring.\n");
|
||||
return -1;
|
||||
if (!vq->shadow_used_split) {
|
||||
RTE_LOG(ERR, VHOST_CONFIG,
|
||||
"failed to allocate memory for shadow used ring.\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
vq->batch_copy_elems = rte_malloc(NULL,
|
||||
@ -315,7 +328,8 @@ numa_realloc(struct virtio_net *dev, int index)
|
||||
struct virtio_net *old_dev;
|
||||
struct vhost_virtqueue *old_vq, *vq;
|
||||
struct zcopy_mbuf *new_zmbuf;
|
||||
struct vring_used_elem *new_shadow_used_ring;
|
||||
struct vring_used_elem *new_shadow_used_split;
|
||||
struct vring_used_elem_packed *new_shadow_used_packed;
|
||||
struct batch_copy_elem *new_batch_copy_elems;
|
||||
int ret;
|
||||
|
||||
@ -350,13 +364,26 @@ numa_realloc(struct virtio_net *dev, int index)
|
||||
vq->zmbufs = new_zmbuf;
|
||||
}
|
||||
|
||||
new_shadow_used_ring = rte_malloc_socket(NULL,
|
||||
vq->size * sizeof(struct vring_used_elem),
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
newnode);
|
||||
if (new_shadow_used_ring) {
|
||||
rte_free(vq->shadow_used_ring);
|
||||
vq->shadow_used_ring = new_shadow_used_ring;
|
||||
if (vq_is_packed(dev)) {
|
||||
new_shadow_used_packed = rte_malloc_socket(NULL,
|
||||
vq->size *
|
||||
sizeof(struct vring_used_elem_packed),
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
newnode);
|
||||
if (new_shadow_used_packed) {
|
||||
rte_free(vq->shadow_used_packed);
|
||||
vq->shadow_used_packed = new_shadow_used_packed;
|
||||
}
|
||||
} else {
|
||||
new_shadow_used_split = rte_malloc_socket(NULL,
|
||||
vq->size *
|
||||
sizeof(struct vring_used_elem),
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
newnode);
|
||||
if (new_shadow_used_split) {
|
||||
rte_free(vq->shadow_used_split);
|
||||
vq->shadow_used_split = new_shadow_used_split;
|
||||
}
|
||||
}
|
||||
|
||||
new_batch_copy_elems = rte_malloc_socket(NULL,
|
||||
@ -1047,8 +1074,13 @@ vhost_user_get_vring_base(struct virtio_net *dev,
|
||||
|
||||
if (dev->dequeue_zero_copy)
|
||||
free_zmbufs(vq);
|
||||
rte_free(vq->shadow_used_ring);
|
||||
vq->shadow_used_ring = NULL;
|
||||
if (vq_is_packed(dev)) {
|
||||
rte_free(vq->shadow_used_packed);
|
||||
vq->shadow_used_packed = NULL;
|
||||
} else {
|
||||
rte_free(vq->shadow_used_split);
|
||||
vq->shadow_used_split = NULL;
|
||||
}
|
||||
|
||||
rte_free(vq->batch_copy_elems);
|
||||
vq->batch_copy_elems = NULL;
|
||||
|
@ -82,7 +82,7 @@ do_flush_shadow_used_ring_split(struct virtio_net *dev,
|
||||
uint16_t to, uint16_t from, uint16_t size)
|
||||
{
|
||||
rte_memcpy(&vq->used->ring[to],
|
||||
&vq->shadow_used_ring[from],
|
||||
&vq->shadow_used_split[from],
|
||||
size * sizeof(struct vring_used_elem));
|
||||
vhost_log_cache_used_vring(dev, vq,
|
||||
offsetof(struct vring_used, ring[to]),
|
||||
@ -126,8 +126,73 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq,
|
||||
{
|
||||
uint16_t i = vq->shadow_used_idx++;
|
||||
|
||||
vq->shadow_used_ring[i].id = desc_idx;
|
||||
vq->shadow_used_ring[i].len = len;
|
||||
vq->shadow_used_split[i].id = desc_idx;
|
||||
vq->shadow_used_split[i].len = len;
|
||||
}
|
||||
|
||||
static __rte_unused __rte_always_inline void
|
||||
flush_shadow_used_ring_packed(struct virtio_net *dev,
|
||||
struct vhost_virtqueue *vq)
|
||||
{
|
||||
int i;
|
||||
uint16_t used_idx = vq->last_used_idx;
|
||||
|
||||
/* Split loop in two to save memory barriers */
|
||||
for (i = 0; i < vq->shadow_used_idx; i++) {
|
||||
vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
|
||||
vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
|
||||
|
||||
used_idx += vq->shadow_used_packed[i].count;
|
||||
if (used_idx >= vq->size)
|
||||
used_idx -= vq->size;
|
||||
}
|
||||
|
||||
rte_smp_wmb();
|
||||
|
||||
for (i = 0; i < vq->shadow_used_idx; i++) {
|
||||
uint16_t flags;
|
||||
|
||||
if (vq->shadow_used_packed[i].len)
|
||||
flags = VRING_DESC_F_WRITE;
|
||||
else
|
||||
flags = 0;
|
||||
|
||||
if (vq->used_wrap_counter) {
|
||||
flags |= VRING_DESC_F_USED;
|
||||
flags |= VRING_DESC_F_AVAIL;
|
||||
} else {
|
||||
flags &= ~VRING_DESC_F_USED;
|
||||
flags &= ~VRING_DESC_F_AVAIL;
|
||||
}
|
||||
|
||||
vq->desc_packed[vq->last_used_idx].flags = flags;
|
||||
|
||||
vhost_log_cache_used_vring(dev, vq,
|
||||
vq->last_used_idx *
|
||||
sizeof(struct vring_packed_desc),
|
||||
sizeof(struct vring_packed_desc));
|
||||
|
||||
vq->last_used_idx += vq->shadow_used_packed[i].count;
|
||||
if (vq->last_used_idx >= vq->size) {
|
||||
vq->used_wrap_counter ^= 1;
|
||||
vq->last_used_idx -= vq->size;
|
||||
}
|
||||
}
|
||||
|
||||
rte_smp_wmb();
|
||||
vq->shadow_used_idx = 0;
|
||||
vhost_log_cache_sync(dev, vq);
|
||||
}
|
||||
|
||||
static __rte_unused __rte_always_inline void
|
||||
update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
|
||||
uint16_t desc_idx, uint16_t len, uint16_t count)
|
||||
{
|
||||
uint16_t i = vq->shadow_used_idx++;
|
||||
|
||||
vq->shadow_used_packed[i].id = desc_idx;
|
||||
vq->shadow_used_packed[i].len = len;
|
||||
vq->shadow_used_packed[i].count = count;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
Loading…
Reference in New Issue
Block a user