net/virtio-user: do not close tap when disabling queue pairs

Do not close the tap fds when disabling queue pairs, instead,
we just need to unbind the backend. Otherwise, tap port can be
destroyed unexpectedly.

Fixes: e3b434818bbb ("net/virtio-user: support kernel vhost")
Cc: stable@dpdk.org

Reported-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Tiwei Bie 2019-11-25 16:14:40 +08:00 committed by Ferruh Yigit
parent fd4e6f2326
commit 47ac9661b6
6 changed files with 67 additions and 23 deletions

View File

@ -330,14 +330,32 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
vhostfd = dev->vhostfds[pair_idx];
if (!enable) {
if (dev->tapfds[pair_idx] >= 0) {
close(dev->tapfds[pair_idx]);
dev->tapfds[pair_idx] = -1;
}
return vhost_kernel_set_backend(vhostfd, -1);
} else if (dev->tapfds[pair_idx] >= 0) {
if (dev->qp_enabled[pair_idx] == enable)
return 0;
if (!enable) {
tapfd = dev->tapfds[pair_idx];
if (vhost_kernel_set_backend(vhostfd, -1) < 0) {
PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
return -1;
}
if (req_mq && vhost_kernel_tap_set_queue(tapfd, false) < 0) {
PMD_DRV_LOG(ERR, "fail to disable tap for vhost kernel");
return -1;
}
dev->qp_enabled[pair_idx] = false;
return 0;
}
if (dev->tapfds[pair_idx] >= 0) {
tapfd = dev->tapfds[pair_idx];
if (vhost_kernel_tap_set_offload(tapfd, dev->features) == -1)
return -1;
if (req_mq && vhost_kernel_tap_set_queue(tapfd, true) < 0) {
PMD_DRV_LOG(ERR, "fail to enable tap for vhost kernel");
return -1;
}
goto set_backend;
}
if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) ||
@ -353,13 +371,15 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
return -1;
}
dev->tapfds[pair_idx] = tapfd;
set_backend:
if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) {
PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
close(tapfd);
return -1;
}
dev->tapfds[pair_idx] = tapfd;
dev->qp_enabled[pair_idx] = true;
return 0;
}

View File

@ -18,7 +18,7 @@
#include "../virtio_logs.h"
#include "../virtio_pci.h"
static int
int
vhost_kernel_tap_set_offload(int fd, uint64_t features)
{
unsigned int offload = 0;
@ -37,26 +37,34 @@ vhost_kernel_tap_set_offload(int fd, uint64_t features)
offload |= TUN_F_UFO;
}
if (offload != 0) {
/* Check if our kernel supports TUNSETOFFLOAD */
if (ioctl(fd, TUNSETOFFLOAD, 0) != 0 && errno == EINVAL) {
PMD_DRV_LOG(ERR, "Kernel does't support TUNSETOFFLOAD\n");
return -ENOTSUP;
}
/* Check if our kernel supports TUNSETOFFLOAD */
if (ioctl(fd, TUNSETOFFLOAD, 0) != 0 && errno == EINVAL) {
PMD_DRV_LOG(ERR, "Kernel does't support TUNSETOFFLOAD\n");
return -ENOTSUP;
}
if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) {
offload &= ~TUN_F_UFO;
if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) {
offload &= ~TUN_F_UFO;
if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) {
PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s\n",
strerror(errno));
return -1;
}
PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s\n",
strerror(errno));
return -1;
}
}
return 0;
}
int
vhost_kernel_tap_set_queue(int fd, bool attach)
{
struct ifreq ifr = {
.ifr_flags = attach ? IFF_ATTACH_QUEUE : IFF_DETACH_QUEUE,
};
return ioctl(fd, TUNSETQUEUE, &ifr);
}
int
vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
const char *mac, uint64_t features)

View File

@ -2,6 +2,10 @@
* Copyright(c) 2016 Intel Corporation
*/
#ifndef _VHOST_KERNEL_TAP_H
#define _VHOST_KERNEL_TAP_H
#include <stdbool.h>
#include <sys/ioctl.h>
/* TUN ioctls */
@ -37,3 +41,7 @@
int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
const char *mac, uint64_t features);
int vhost_kernel_tap_set_offload(int fd, uint64_t features);
int vhost_kernel_tap_set_queue(int fd, bool attach);
#endif

View File

@ -456,6 +456,9 @@ vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
{
int i;
if (dev->qp_enabled[pair_idx] == enable)
return 0;
for (i = 0; i < 2; ++i) {
struct vhost_vring_state state = {
.index = pair_idx * 2 + i,
@ -466,6 +469,7 @@ vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
return -1;
}
dev->qp_enabled[pair_idx] = enable;
return 0;
}

View File

@ -546,8 +546,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
}
if (dev->vhostfds) {
for (i = 0; i < dev->max_queue_pairs; ++i)
for (i = 0; i < dev->max_queue_pairs; ++i) {
close(dev->vhostfds[i]);
if (dev->tapfds[i] >= 0)
close(dev->tapfds[i]);
}
free(dev->vhostfds);
free(dev->tapfds);
}

View File

@ -49,6 +49,7 @@ struct virtio_user_dev {
struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES];
};
struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
bool qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
struct virtio_user_backend_ops *ops;
pthread_mutex_t mutex;