net/virtio: move vhost-user specifics to its backend

This patch moves all the Vhost-user backend specific
logic like Vhost FD, listen FD and interrupt handling
to the vhost-user backend implementation.

In order to achieve that, new ops are created to update
the link status, disconnect and reconnect the server,
and fetch the link state interrupt FD.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
This commit is contained in:
Maxime Coquelin 2021-01-26 11:16:35 +01:00 committed by Ferruh Yigit
parent 748e5ea58a
commit 949735312f
7 changed files with 350 additions and 233 deletions

View File

@ -82,6 +82,10 @@ struct virtio_user_backend_ops {
int (*enable_qp)(struct virtio_user_dev *dev, uint16_t pair_idx, int enable);
int (*dma_map)(struct virtio_user_dev *dev, void *addr, uint64_t iova, size_t len);
int (*dma_unmap)(struct virtio_user_dev *dev, void *addr, uint64_t iova, size_t len);
int (*update_link_state)(struct virtio_user_dev *dev);
int (*server_disconnect)(struct virtio_user_dev *dev);
int (*server_reconnect)(struct virtio_user_dev *dev);
int (*get_intr_fd)(struct virtio_user_dev *dev);
};
extern struct virtio_user_backend_ops virtio_ops_user;

View File

@ -459,6 +459,20 @@ vhost_kernel_get_backend_features(uint64_t *features)
return 0;
}
static int
vhost_kernel_update_link_state(struct virtio_user_dev *dev __rte_unused)
{
/* Nothing to update (Maybe get TAP interface link state?) */
return 0;
}
static int
vhost_kernel_get_intr_fd(struct virtio_user_dev *dev __rte_unused)
{
/* No link state interrupt with Vhost-kernel */
return -1;
}
struct virtio_user_backend_ops virtio_ops_kernel = {
.setup = vhost_kernel_setup,
.destroy = vhost_kernel_destroy,
@ -475,5 +489,7 @@ struct virtio_user_backend_ops virtio_ops_kernel = {
.set_vring_addr = vhost_kernel_set_vring_addr,
.get_status = vhost_kernel_get_status,
.set_status = vhost_kernel_set_status,
.enable_qp = vhost_kernel_enable_queue_pair
.enable_qp = vhost_kernel_enable_queue_pair,
.update_link_state = vhost_kernel_update_link_state,
.get_intr_fd = vhost_kernel_get_intr_fd,
};

View File

@ -11,6 +11,7 @@
#include <string.h>
#include <errno.h>
#include <rte_alarm.h>
#include <rte_string_fns.h>
#include <rte_fbarray.h>
@ -18,6 +19,8 @@
#include "virtio_user_dev.h"
struct vhost_user_data {
int vhostfd;
int listenfd;
uint64_t protocol_features;
};
@ -182,13 +185,14 @@ vhost_user_read(int fd, struct vhost_user_msg *msg)
static int
vhost_user_check_reply_ack(struct virtio_user_dev *dev, struct vhost_user_msg *msg)
{
struct vhost_user_data *data = dev->backend_data;
enum vhost_user_request req = msg->request;
int ret;
if (!(msg->flags & VHOST_USER_NEED_REPLY_MASK))
return 0;
ret = vhost_user_read(dev->vhostfd, msg);
ret = vhost_user_read(data->vhostfd, msg);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read reply-ack");
return -1;
@ -216,12 +220,13 @@ static int
vhost_user_set_owner(struct virtio_user_dev *dev)
{
int ret;
struct vhost_user_data *data = dev->backend_data;
struct vhost_user_msg msg = {
.request = VHOST_USER_SET_OWNER,
.flags = VHOST_USER_VERSION,
};
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to set owner");
return -1;
@ -234,16 +239,17 @@ static int
vhost_user_get_protocol_features(struct virtio_user_dev *dev, uint64_t *features)
{
int ret;
struct vhost_user_data *data = dev->backend_data;
struct vhost_user_msg msg = {
.request = VHOST_USER_GET_PROTOCOL_FEATURES,
.flags = VHOST_USER_VERSION,
};
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0)
goto err;
ret = vhost_user_read(dev->vhostfd, &msg);
ret = vhost_user_read(data->vhostfd, &msg);
if (ret < 0)
goto err;
@ -270,6 +276,7 @@ static int
vhost_user_set_protocol_features(struct virtio_user_dev *dev, uint64_t features)
{
int ret;
struct vhost_user_data *data = dev->backend_data;
struct vhost_user_msg msg = {
.request = VHOST_USER_SET_PROTOCOL_FEATURES,
.flags = VHOST_USER_VERSION,
@ -277,7 +284,7 @@ vhost_user_set_protocol_features(struct virtio_user_dev *dev, uint64_t features)
.payload.u64 = features,
};
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to set protocol features");
return -1;
@ -296,11 +303,11 @@ vhost_user_get_features(struct virtio_user_dev *dev, uint64_t *features)
.flags = VHOST_USER_VERSION,
};
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0)
goto err;
ret = vhost_user_read(dev->vhostfd, &msg);
ret = vhost_user_read(data->vhostfd, &msg);
if (ret < 0)
goto err;
@ -344,6 +351,7 @@ static int
vhost_user_set_features(struct virtio_user_dev *dev, uint64_t features)
{
int ret;
struct vhost_user_data *data = dev->backend_data;
struct vhost_user_msg msg = {
.request = VHOST_USER_SET_FEATURES,
.flags = VHOST_USER_VERSION,
@ -353,7 +361,7 @@ vhost_user_set_features(struct virtio_user_dev *dev, uint64_t features)
msg.payload.u64 |= dev->device_features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to set features");
return -1;
@ -477,7 +485,7 @@ vhost_user_set_memory_table(struct virtio_user_dev *dev)
msg.size += sizeof(msg.payload.memory.padding);
msg.size += fd_num * sizeof(struct vhost_memory_region);
ret = vhost_user_write(dev->vhostfd, &msg, fds, fd_num);
ret = vhost_user_write(data->vhostfd, &msg, fds, fd_num);
if (ret < 0)
goto err;
@ -492,6 +500,7 @@ vhost_user_set_vring(struct virtio_user_dev *dev, enum vhost_user_request req,
struct vhost_vring_state *state)
{
int ret;
struct vhost_user_data *data = dev->backend_data;
struct vhost_user_msg msg = {
.request = req,
.flags = VHOST_USER_VERSION,
@ -499,7 +508,7 @@ vhost_user_set_vring(struct virtio_user_dev *dev, enum vhost_user_request req,
.payload.state = *state,
};
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to set vring state (request %d)", req);
return -1;
@ -531,6 +540,7 @@ vhost_user_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state
{
int ret;
struct vhost_user_msg msg;
struct vhost_user_data *data = dev->backend_data;
unsigned int index = state->index;
ret = vhost_user_set_vring(dev, VHOST_USER_GET_VRING_BASE, state);
@ -539,7 +549,7 @@ vhost_user_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state
goto err;
}
ret = vhost_user_read(dev->vhostfd, &msg);
ret = vhost_user_read(data->vhostfd, &msg);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read reply");
goto err;
@ -575,6 +585,7 @@ vhost_user_set_vring_file(struct virtio_user_dev *dev, enum vhost_user_request r
int ret;
int fd = file->fd;
int num_fd = 0;
struct vhost_user_data *data = dev->backend_data;
struct vhost_user_msg msg = {
.request = req,
.flags = VHOST_USER_VERSION,
@ -587,7 +598,7 @@ vhost_user_set_vring_file(struct virtio_user_dev *dev, enum vhost_user_request r
else
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
ret = vhost_user_write(dev->vhostfd, &msg, &fd, num_fd);
ret = vhost_user_write(data->vhostfd, &msg, &fd, num_fd);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to set vring file (request %d)", req);
return -1;
@ -613,6 +624,7 @@ static int
vhost_user_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr)
{
int ret;
struct vhost_user_data *data = dev->backend_data;
struct vhost_user_msg msg = {
.request = VHOST_USER_SET_VRING_ADDR,
.flags = VHOST_USER_VERSION,
@ -620,7 +632,7 @@ vhost_user_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *
.payload.addr = *addr,
};
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to send vring addresses");
return -1;
@ -653,13 +665,13 @@ vhost_user_get_status(struct virtio_user_dev *dev, uint8_t *status)
if (!(data->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)))
return -ENOTSUP;
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to send request");
goto err;
}
ret = vhost_user_read(dev->vhostfd, &msg);
ret = vhost_user_read(data->vhostfd, &msg);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to recv request");
goto err;
@ -712,7 +724,7 @@ vhost_user_set_status(struct virtio_user_dev *dev, uint8_t status)
if (data->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK))
msg.flags |= VHOST_USER_NEED_REPLY_MASK;
ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
ret = vhost_user_write(data->vhostfd, &msg, NULL, 0);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to send get status request");
return -1;
@ -723,11 +735,12 @@ vhost_user_set_status(struct virtio_user_dev *dev, uint8_t status)
#define MAX_VIRTIO_USER_BACKLOG 1
static int
virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
vhost_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
{
int ret;
int flag;
int fd = dev->listenfd;
struct vhost_user_data *data = dev->backend_data;
int fd = data->listenfd;
ret = bind(fd, (struct sockaddr *)un, sizeof(*un));
if (ret < 0) {
@ -740,8 +753,8 @@ virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
return -1;
PMD_DRV_LOG(NOTICE, "(%s) waiting for client connection...", dev->path);
dev->vhostfd = accept(fd, NULL, NULL);
if (dev->vhostfd < 0) {
data->vhostfd = accept(fd, NULL, NULL);
if (data->vhostfd < 0) {
PMD_DRV_LOG(ERR, "Failed to accept initial client connection (%s)",
strerror(errno));
return -1;
@ -756,6 +769,37 @@ virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
return 0;
}
static int
vhost_user_server_disconnect(struct virtio_user_dev *dev)
{
struct vhost_user_data *data = dev->backend_data;
if (data->vhostfd < 0) {
PMD_DRV_LOG(ERR, "(%s) Expected valid Vhost FD", dev->path);
return -1;
}
close(data->vhostfd);
data->vhostfd = -1;
return 0;
}
static int
vhost_user_server_reconnect(struct virtio_user_dev *dev)
{
struct vhost_user_data *data = dev->backend_data;
int fd;
fd = accept(data->listenfd, NULL, NULL);
if (fd < 0)
return -1;
data->vhostfd = fd;
return 0;
}
/**
* Set up environment to talk with a vhost user backend.
*
@ -781,6 +825,8 @@ vhost_user_setup(struct virtio_user_dev *dev)
dev->backend_data = data;
data->vhostfd = -1;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0) {
PMD_DRV_LOG(ERR, "socket() error, %s", strerror(errno));
@ -796,8 +842,8 @@ vhost_user_setup(struct virtio_user_dev *dev)
strlcpy(un.sun_path, dev->path, sizeof(un.sun_path));
if (dev->is_server) {
dev->listenfd = fd;
if (virtio_user_start_server(dev, &un) < 0) {
data->listenfd = fd;
if (vhost_user_start_server(dev, &un) < 0) {
PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode");
goto err_socket;
}
@ -806,7 +852,7 @@ vhost_user_setup(struct virtio_user_dev *dev)
PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
goto err_socket;
}
dev->vhostfd = fd;
data->vhostfd = fd;
}
return 0;
@ -823,11 +869,24 @@ vhost_user_setup(struct virtio_user_dev *dev)
static int
vhost_user_destroy(struct virtio_user_dev *dev)
{
if (dev->backend_data) {
free(dev->backend_data);
dev->backend_data = NULL;
struct vhost_user_data *data = dev->backend_data;
if (!data)
return 0;
if (data->vhostfd >= 0) {
close(data->vhostfd);
data->vhostfd = -1;
}
if (data->listenfd >= 0) {
close(data->listenfd);
data->listenfd = -1;
}
free(data);
dev->backend_data = NULL;
return 0;
}
@ -836,8 +895,12 @@ vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
uint16_t pair_idx,
int enable)
{
struct vhost_user_data *data = dev->backend_data;
int i;
if (data->vhostfd < 0)
return 0;
if (dev->qp_enabled[pair_idx] == enable)
return 0;
@ -863,6 +926,61 @@ vhost_user_get_backend_features(uint64_t *features)
return 0;
}
static int
vhost_user_update_link_state(struct virtio_user_dev *dev)
{
struct vhost_user_data *data = dev->backend_data;
char buf[128];
if (data->vhostfd >= 0) {
int r;
int flags;
flags = fcntl(data->vhostfd, F_GETFL);
if (fcntl(data->vhostfd, F_SETFL, flags | O_NONBLOCK) == -1) {
PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
return -1;
}
r = recv(data->vhostfd, buf, 128, MSG_PEEK);
if (r == 0 || (r < 0 && errno != EAGAIN)) {
dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
PMD_DRV_LOG(ERR, "virtio-user port %u is down", dev->port_id);
/* This function could be called in the process
* of interrupt handling, callback cannot be
* unregistered here, set an alarm to do it.
*/
rte_eal_alarm_set(1, virtio_user_dev_delayed_handler, (void *)dev);
} else {
dev->net_status |= VIRTIO_NET_S_LINK_UP;
}
if (fcntl(data->vhostfd, F_SETFL,
flags & ~O_NONBLOCK) == -1) {
PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
return -1;
}
} else if (dev->is_server) {
dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
if (virtio_user_dev_server_reconnect(dev) >= 0)
dev->net_status |= VIRTIO_NET_S_LINK_UP;
}
return 0;
}
static int
vhost_user_get_intr_fd(struct virtio_user_dev *dev)
{
struct vhost_user_data *data = dev->backend_data;
if (dev->is_server && data->vhostfd == -1)
return data->listenfd;
return data->vhostfd;
}
struct virtio_user_backend_ops virtio_ops_user = {
.setup = vhost_user_setup,
.destroy = vhost_user_destroy,
@ -879,5 +997,9 @@ struct virtio_user_backend_ops virtio_ops_user = {
.set_vring_addr = vhost_user_set_vring_addr,
.get_status = vhost_user_get_status,
.set_status = vhost_user_set_status,
.enable_qp = vhost_user_enable_queue_pair
.enable_qp = vhost_user_enable_queue_pair,
.update_link_state = vhost_user_update_link_state,
.server_disconnect = vhost_user_server_disconnect,
.server_reconnect = vhost_user_server_reconnect,
.get_intr_fd = vhost_user_get_intr_fd,
};

View File

@ -471,6 +471,20 @@ vhost_vdpa_get_backend_features(uint64_t *features)
return 0;
}
static int
vhost_vdpa_update_link_state(struct virtio_user_dev *dev __rte_unused)
{
/* Nothing to update (for now?) */
return 0;
}
static int
vhost_vdpa_get_intr_fd(struct virtio_user_dev *dev __rte_unused)
{
/* No link state interrupt with Vhost-vDPA */
return -1;
}
struct virtio_user_backend_ops virtio_ops_vdpa = {
.setup = vhost_vdpa_setup,
.destroy = vhost_vdpa_destroy,
@ -490,4 +504,6 @@ struct virtio_user_backend_ops virtio_ops_vdpa = {
.enable_qp = vhost_vdpa_enable_queue_pair,
.dma_map = vhost_vdpa_dma_map_batch,
.dma_unmap = vhost_vdpa_dma_unmap_batch,
.update_link_state = vhost_vdpa_update_link_state,
.get_intr_fd = vhost_vdpa_get_intr_fd,
};

View File

@ -343,11 +343,7 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
/* For virtio vdev, no need to read counter for clean */
eth_dev->intr_handle->efd_counter_size = 0;
eth_dev->intr_handle->fd = -1;
if (dev->vhostfd >= 0)
eth_dev->intr_handle->fd = dev->vhostfd;
else if (dev->is_server)
eth_dev->intr_handle->fd = dev->listenfd;
eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
return 0;
}
@ -404,7 +400,6 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
{
uint32_t q;
dev->vhostfd = -1;
dev->vhostfds = NULL;
dev->tapfds = NULL;
@ -598,15 +593,6 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
close(dev->callfds[i]);
close(dev->kickfds[i]);
}
if (dev->vhostfd >= 0)
close(dev->vhostfd);
if (dev->is_server && dev->listenfd >= 0) {
close(dev->listenfd);
dev->listenfd = -1;
}
if (dev->vhostfds) {
for (i = 0; i < dev->max_queue_pairs; ++i) {
close(dev->vhostfds[i]);
@ -637,15 +623,11 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
return -1;
}
/* Server mode can't enable queue pairs if vhostfd is invalid,
* always return 0 in this case.
*/
if (!dev->is_server || dev->vhostfd >= 0) {
for (i = 0; i < q_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 1);
for (i = q_pairs; i < dev->max_queue_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 0);
}
for (i = 0; i < q_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 1);
for (i = q_pairs; i < dev->max_queue_pairs; ++i)
ret |= dev->ops->enable_qp(dev, i, 0);
dev->queue_pairs = q_pairs;
return ret;
@ -860,3 +842,154 @@ virtio_user_dev_update_status(struct virtio_user_dev *dev)
pthread_mutex_unlock(&dev->mutex);
return ret;
}
int
virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
{
if (dev->ops->update_link_state)
return dev->ops->update_link_state(dev);
return 0;
}
static void
virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
{
struct virtio_user_dev *dev = eth_dev->data->dev_private;
struct virtio_hw *hw = &dev->hw;
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq;
uint16_t i;
/* Add lock to avoid queue contention. */
rte_spinlock_lock(&hw->state_lock);
hw->started = 0;
/*
* Waiting for datapath to complete before resetting queues.
* 1 ms should be enough for the ongoing Tx/Rx function to finish.
*/
rte_delay_ms(1);
/* Vring reset for each Tx queue and Rx queue. */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxvq = eth_dev->data->rx_queues[i];
virtqueue_rxvq_reset_packed(rxvq->vq);
virtio_dev_rx_queue_setup_finish(eth_dev, i);
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
txvq = eth_dev->data->tx_queues[i];
virtqueue_txvq_reset_packed(txvq->vq);
}
hw->started = 1;
rte_spinlock_unlock(&hw->state_lock);
}
void
virtio_user_dev_delayed_handler(void *param)
{
struct virtio_user_dev *dev = param;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
if (rte_intr_disable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt disable failed");
return;
}
rte_intr_callback_unregister(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
if (dev->is_server) {
if (dev->ops->server_disconnect)
dev->ops->server_disconnect(dev);
eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
rte_intr_callback_register(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
if (rte_intr_enable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return;
}
}
}
int
virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
{
int ret, old_status;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
struct virtio_hw *hw = &dev->hw;
if (!dev->ops->server_reconnect) {
PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
return -1;
}
if (dev->ops->server_reconnect(dev)) {
PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
return -1;
}
old_status = dev->status;
virtio_reset(hw);
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
if (dev->ops->get_features(dev, &dev->device_features) < 0) {
PMD_INIT_LOG(ERR, "get_features failed: %s",
strerror(errno));
return -1;
}
dev->device_features |= dev->frontend_features;
/* unmask vhost-user unsupported features */
dev->device_features &= ~(dev->unsupported_features);
dev->features &= dev->device_features;
/* For packed ring, resetting queues is required in reconnection. */
if (virtio_with_packed_queue(hw) &&
(old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
" when packed ring reconnecting.");
virtio_user_dev_reset_queues_packed(eth_dev);
}
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
/* Start the device */
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
if (!dev->started)
return -1;
if (dev->queue_pairs > 1) {
ret = virtio_user_handle_mq(dev, dev->queue_pairs);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
return -1;
}
}
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
if (rte_intr_disable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt disable failed");
return -1;
}
rte_intr_callback_unregister(eth_dev->intr_handle,
virtio_interrupt_handler,
eth_dev);
eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
rte_intr_callback_register(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
if (rte_intr_enable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -1;
}
}
PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
return 0;
}

View File

@ -27,11 +27,11 @@ struct virtio_user_queue {
struct virtio_user_dev {
struct virtio_hw hw;
enum virtio_user_backend_type backend_type;
/* for vhost_user backend */
int vhostfd;
int listenfd; /* listening fd */
bool is_server; /* server or client mode */
/* for vhost_vdpa backend */
int vhostfd;
/* for vhost_kernel backend */
char *ifname;
int *vhostfds;
@ -85,5 +85,8 @@ void virtio_user_handle_cq_packed(struct virtio_user_dev *dev,
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
int virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status);
int virtio_user_dev_update_status(struct virtio_user_dev *dev);
int virtio_user_dev_update_link_state(struct virtio_user_dev *dev);
void virtio_user_dev_delayed_handler(void *param);
int virtio_user_dev_server_reconnect(struct virtio_user_dev *dev);
extern const char * const virtio_user_backend_strings[];
#endif

View File

@ -28,146 +28,6 @@
#define virtio_user_get_dev(hwp) container_of(hwp, struct virtio_user_dev, hw)
static void
virtio_user_reset_queues_packed(struct rte_eth_dev *eth_dev)
{
struct virtio_user_dev *dev = eth_dev->data->dev_private;
struct virtio_hw *hw = &dev->hw;
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq;
uint16_t i;
/* Add lock to avoid queue contention. */
rte_spinlock_lock(&hw->state_lock);
hw->started = 0;
/*
* Waitting for datapath to complete before resetting queues.
* 1 ms should be enough for the ongoing Tx/Rx function to finish.
*/
rte_delay_ms(1);
/* Vring reset for each Tx queue and Rx queue. */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxvq = eth_dev->data->rx_queues[i];
virtqueue_rxvq_reset_packed(rxvq->vq);
virtio_dev_rx_queue_setup_finish(eth_dev, i);
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
txvq = eth_dev->data->tx_queues[i];
virtqueue_txvq_reset_packed(txvq->vq);
}
hw->started = 1;
rte_spinlock_unlock(&hw->state_lock);
}
static int
virtio_user_server_reconnect(struct virtio_user_dev *dev)
{
int ret, connectfd, old_status;
struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
struct virtio_hw *hw = &dev->hw;
connectfd = accept(dev->listenfd, NULL, NULL);
if (connectfd < 0)
return -1;
dev->vhostfd = connectfd;
old_status = dev->status;
virtio_reset(hw);
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
if (dev->ops->get_features(dev, &dev->device_features) < 0) {
PMD_INIT_LOG(ERR, "get_features failed: %s",
strerror(errno));
return -1;
}
dev->device_features |= dev->frontend_features;
/* umask vhost-user unsupported features */
dev->device_features &= ~(dev->unsupported_features);
dev->features &= dev->device_features;
/* For packed ring, resetting queues is required in reconnection. */
if (virtio_with_packed_queue(hw) &&
(old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
" when packed ring reconnecting.");
virtio_user_reset_queues_packed(eth_dev);
}
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
/* Start the device */
virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
if (!dev->started)
return -1;
if (dev->queue_pairs > 1) {
ret = virtio_user_handle_mq(dev, dev->queue_pairs);
if (ret != 0) {
PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
return -1;
}
}
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
if (rte_intr_disable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt disable failed");
return -1;
}
rte_intr_callback_unregister(eth_dev->intr_handle,
virtio_interrupt_handler,
eth_dev);
eth_dev->intr_handle->fd = connectfd;
rte_intr_callback_register(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
if (rte_intr_enable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -1;
}
}
PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
return 0;
}
static void
virtio_user_delayed_handler(void *param)
{
struct virtio_hw *hw = (struct virtio_hw *)param;
struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if (rte_intr_disable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt disable failed");
return;
}
rte_intr_callback_unregister(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
if (dev->is_server) {
if (dev->vhostfd >= 0) {
close(dev->vhostfd);
dev->vhostfd = -1;
}
eth_dev->intr_handle->fd = dev->listenfd;
rte_intr_callback_register(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
if (rte_intr_enable(eth_dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return;
}
}
}
static void
virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
void *dst, int length)
@ -183,44 +43,7 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
}
if (offset == offsetof(struct virtio_net_config, status)) {
char buf[128];
if (dev->vhostfd >= 0) {
int r;
int flags;
flags = fcntl(dev->vhostfd, F_GETFL);
if (fcntl(dev->vhostfd, F_SETFL,
flags | O_NONBLOCK) == -1) {
PMD_DRV_LOG(ERR, "error setting O_NONBLOCK flag");
return;
}
r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
if (r == 0 || (r < 0 && errno != EAGAIN)) {
dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
PMD_DRV_LOG(ERR, "virtio-user port %u is down",
hw->port_id);
/* This function could be called in the process
* of interrupt handling, callback cannot be
* unregistered here, set an alarm to do it.
*/
rte_eal_alarm_set(1,
virtio_user_delayed_handler,
(void *)hw);
} else {
dev->net_status |= VIRTIO_NET_S_LINK_UP;
}
if (fcntl(dev->vhostfd, F_SETFL,
flags & ~O_NONBLOCK) == -1) {
PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
return;
}
} else if (dev->is_server) {
dev->net_status &= (~VIRTIO_NET_S_LINK_UP);
if (virtio_user_server_reconnect(dev) >= 0)
dev->net_status |= VIRTIO_NET_S_LINK_UP;
}
virtio_user_dev_update_link_state(dev);
*(uint16_t *)dst = dev->net_status;
}