net/virtio-user: support memory hotplug
When memory is hot-added or hot-removed, the virtio-user driver has to notify the vhost-user backend with sending a VHOST_USER_SET_MEM_TABLE request with the new memory map as payload. This patch implements and registers a mem_event callback, it pauses the datapath and updates memory regions to vhost in case of hot-add or hot-remove event. This memory region update has only to be done when the device is already started, so a new status flag is added to the device to keep track of the status. As the device can now be managed by different threads, a mutex is introduced to protect against concurrent device configuration. Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
This commit is contained in:
parent
d5022533c2
commit
12ecb2f63b
@ -17,6 +17,8 @@
|
||||
#include "virtio_user_dev.h"
|
||||
#include "../virtio_ethdev.h"
|
||||
|
||||
#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
|
||||
|
||||
static int
|
||||
virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
|
||||
{
|
||||
@ -110,8 +112,10 @@ virtio_user_start_device(struct virtio_user_dev *dev)
|
||||
uint64_t features;
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&dev->mutex);
|
||||
|
||||
if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
|
||||
return -1;
|
||||
goto error;
|
||||
|
||||
/* Do not check return as already done in init, or reset in stop */
|
||||
dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
|
||||
@ -146,8 +150,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
|
||||
*/
|
||||
dev->ops->enable_qp(dev, 0, 1);
|
||||
|
||||
dev->started = true;
|
||||
pthread_mutex_unlock(&dev->mutex);
|
||||
|
||||
return 0;
|
||||
error:
|
||||
pthread_mutex_unlock(&dev->mutex);
|
||||
/* TODO: free resource here or caller to check */
|
||||
return -1;
|
||||
}
|
||||
@ -156,13 +164,17 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
pthread_mutex_lock(&dev->mutex);
|
||||
for (i = 0; i < dev->max_queue_pairs; ++i)
|
||||
dev->ops->enable_qp(dev, i, 0);
|
||||
|
||||
if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
|
||||
PMD_DRV_LOG(INFO, "Failed to reset the device\n");
|
||||
pthread_mutex_unlock(&dev->mutex);
|
||||
return -1;
|
||||
}
|
||||
dev->started = false;
|
||||
pthread_mutex_unlock(&dev->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -263,6 +275,35 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
|
||||
const void *addr __rte_unused,
|
||||
size_t len __rte_unused,
|
||||
void *arg)
|
||||
{
|
||||
struct virtio_user_dev *dev = arg;
|
||||
uint16_t i;
|
||||
|
||||
pthread_mutex_lock(&dev->mutex);
|
||||
|
||||
if (dev->started == false)
|
||||
goto exit;
|
||||
|
||||
/* Step 1: pause the active queues */
|
||||
for (i = 0; i < dev->queue_pairs; i++)
|
||||
dev->ops->enable_qp(dev, i, 0);
|
||||
|
||||
/* Step 2: update memory regions */
|
||||
dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
|
||||
|
||||
/* Step 3: resume the active queues */
|
||||
for (i = 0; i < dev->queue_pairs; i++)
|
||||
dev->ops->enable_qp(dev, i, 1);
|
||||
|
||||
exit:
|
||||
pthread_mutex_unlock(&dev->mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_user_dev_setup(struct virtio_user_dev *dev)
|
||||
{
|
||||
@ -336,7 +377,9 @@ int
|
||||
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
|
||||
int cq, int queue_size, const char *mac, char **ifname)
|
||||
{
|
||||
pthread_mutex_init(&dev->mutex, NULL);
|
||||
snprintf(dev->path, PATH_MAX, "%s", path);
|
||||
dev->started = 0;
|
||||
dev->max_queue_pairs = queues;
|
||||
dev->queue_pairs = 1; /* mq disabled by default */
|
||||
dev->queue_size = queue_size;
|
||||
@ -400,6 +443,12 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
|
||||
|
||||
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
|
||||
|
||||
if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
|
||||
virtio_user_mem_event_cb, dev)) {
|
||||
PMD_INIT_LOG(ERR, "Failed to register mem event callback\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -410,6 +459,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
|
||||
|
||||
virtio_user_stop_device(dev);
|
||||
|
||||
rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
|
||||
|
||||
for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
|
||||
close(dev->callfds[i]);
|
||||
close(dev->kickfds[i]);
|
||||
|
@ -39,6 +39,8 @@ struct virtio_user_dev {
|
||||
char path[PATH_MAX];
|
||||
struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
|
||||
struct virtio_user_backend_ops *ops;
|
||||
pthread_mutex_t mutex;
|
||||
bool started;
|
||||
};
|
||||
|
||||
int is_vhost_user_by_type(const char *path);
|
||||
|
Loading…
Reference in New Issue
Block a user