vhost: move dirty logging cache out of virtqueue

This patch moves the per-virtqueue's dirty logging cache
out of the virtqueue struct, by allocating it dynamically
only when live-migration is enabled.

It saves 8 cachelines in vhost_virtqueue struct.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
Reviewed-by: David Marchand <david.marchand@redhat.com>
Tested-by: Balazs Nemeth <bnemeth@redhat.com>
This commit is contained in:
Maxime Coquelin 2021-03-23 10:02:18 +01:00 committed by Chenbo Xia
parent 2453bbf7e1
commit 1818a63147
3 changed files with 35 additions and 1 deletions

View File

@ -145,6 +145,10 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
if (unlikely(!dev->log_base))
return;
/* No cache, nothing to sync */
if (unlikely(!vq->log_cache))
return;
rte_atomic_thread_fence(__ATOMIC_RELEASE);
log_base = (unsigned long *)(uintptr_t)dev->log_base;
@ -177,6 +181,14 @@ vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t offset = page / (sizeof(unsigned long) << 3);
int i;
if (unlikely(!vq->log_cache)) {
/* No logging cache allocated, write dirty log map directly */
rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
return;
}
for (i = 0; i < vq->log_cache_nb_elem; i++) {
struct log_cache_entry *elem = vq->log_cache + i;
@ -354,6 +366,7 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
}
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
rte_free(vq->log_cache);
rte_free(vq);
}

View File

@ -183,7 +183,7 @@ struct vhost_virtqueue {
bool used_wrap_counter;
bool avail_wrap_counter;
struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
struct log_cache_entry *log_cache;
uint16_t log_cache_nb_elem;
rte_rwlock_t iotlb_lock;

View File

@ -2022,6 +2022,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
rte_free(vq->batch_copy_elems);
vq->batch_copy_elems = NULL;
rte_free(vq->log_cache);
vq->log_cache = NULL;
msg->size = sizeof(msg->payload.state);
msg->fd_num = 0;
@ -2121,6 +2124,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
int fd = msg->fds[0];
uint64_t size, off;
void *addr;
uint32_t i;
if (validate_msg_fds(msg, 1) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
@ -2174,6 +2178,23 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
dev->log_base = dev->log_addr + off;
dev->log_size = size;
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
rte_free(vq->log_cache);
vq->log_cache = NULL;
vq->log_cache_nb_elem = 0;
vq->log_cache = rte_zmalloc("vq log cache",
sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR,
0);
/*
* If log cache alloc fail, don't fail migration, but no
* caching will be done, which will impact performance
*/
if (!vq->log_cache)
VHOST_LOG_CONFIG(ERR, "Failed to allocate VQ logging cache\n");
}
/*
* The spec is not clear about it (yet), but QEMU doesn't expect
* any payload in the reply.