2014-02-10 13:57:48 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2014-02-10 13:57:48 +00:00
|
|
|
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
|
|
|
|
* All rights reserved.
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2014-02-10 13:57:48 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2014-02-10 13:57:48 +00:00
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2014-02-10 13:57:48 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/vhost.h>
|
|
|
|
#include <linux/virtio_net.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
2015-09-18 16:01:10 +08:00
|
|
|
#include <assert.h>
|
2014-02-10 13:57:48 +00:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <unistd.h>
|
2015-06-25 13:47:37 +08:00
|
|
|
#ifdef RTE_LIBRTE_VHOST_NUMA
|
|
|
|
#include <numaif.h>
|
|
|
|
#endif
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2014-12-18 18:07:07 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
#include <rte_ethdev.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_string_fns.h>
|
2014-05-28 16:06:38 +08:00
|
|
|
#include <rte_memory.h>
|
2015-06-25 13:47:36 +08:00
|
|
|
#include <rte_malloc.h>
|
2014-10-09 02:54:54 +08:00
|
|
|
#include <rte_virtio_net.h>
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2015-02-23 17:36:25 +00:00
|
|
|
#include "vhost-net.h"
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
#define MAX_VHOST_DEVICE 1024
|
|
|
|
static struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
|
2014-10-09 02:54:46 +08:00
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* device ops to add/remove device to/from data core. */
|
2015-02-23 17:36:29 +00:00
|
|
|
struct virtio_net_device_ops const *notify_ops;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2015-10-22 20:35:49 +08:00
|
|
|
#define VHOST_USER_F_PROTOCOL_FEATURES 30
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* Features supported by this lib. */
|
2014-11-08 12:26:16 +08:00
|
|
|
#define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
|
2015-02-23 17:36:23 +00:00
|
|
|
(1ULL << VIRTIO_NET_F_CTRL_VQ) | \
|
2015-05-27 11:01:31 +08:00
|
|
|
(1ULL << VIRTIO_NET_F_CTRL_RX) | \
|
2016-01-29 12:58:00 +08:00
|
|
|
(1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
|
2015-10-29 11:37:45 +08:00
|
|
|
(VHOST_SUPPORTS_MQ) | \
|
2015-10-15 14:08:39 +03:00
|
|
|
(1ULL << VIRTIO_F_VERSION_1) | \
|
2015-10-22 20:35:49 +08:00
|
|
|
(1ULL << VHOST_F_LOG_ALL) | \
|
2016-02-05 15:31:38 +08:00
|
|
|
(1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
|
|
|
|
(1ULL << VIRTIO_NET_F_HOST_TSO4) | \
|
|
|
|
(1ULL << VIRTIO_NET_F_HOST_TSO6) | \
|
vhost: add guest offload setting
Add guest offload setting in vhost lib.
Virtio 1.0 spec (5.1.6.4 Processing of Incoming Packets) says:
1. If the VIRTIO_NET_F_GUEST_CSUM feature was negotiated, the
VIRTIO_NET_HDR_F_NEEDS_CSUM bit in flags can be set: if so,
the packet checksum at offset csum_offset from csum_start
and any preceding checksums have been validated. The checksum
on the packet is incomplete and csum_start and csum_offset
indicate how to calculate it (see Packet Transmission point 1).
2. If the VIRTIO_NET_F_GUEST_TSO4, TSO6 or UFO options were
negotiated, then gso_type MAY be something other than
VIRTIO_NET_HDR_GSO_NONE, and gso_size field indicates the
desired MSS (see Packet Transmission point 2).
In order to support these features, the following changes are added,
1. Extend 'VHOST_SUPPORTED_FEATURES' macro to add the offload features negotiation.
2. Enqueue these offloads: convert some fields in mbuf to the fields in virtio_net_hdr.
There are more explanations for the implementation.
For VM2VM case, there is no need to do checksum, for we think the
data should be reliable enough, and setting VIRTIO_NET_HDR_F_NEEDS_CSUM
at RX side will let the TCP layer to bypass the checksum validation,
so that the RX side could receive the packet in the end.
In terms of us-vhost, at vhost RX side, the offload information is
inherited from mbuf, which is in turn inherited from TX side. If we
can still get those info at RX side, it means the packet is from
another VM at same host. So, it's safe to set the
VIRTIO_NET_HDR_F_NEEDS_CSUM, to skip checksum validation.
Signed-off-by: Jijiang Liu <jijiang.liu@intel.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-02-05 15:31:39 +08:00
|
|
|
(1ULL << VIRTIO_NET_F_CSUM) | \
|
|
|
|
(1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
|
|
|
|
(1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
|
|
|
|
(1ULL << VIRTIO_NET_F_GUEST_TSO6))
|
2016-02-05 15:31:38 +08:00
|
|
|
|
2014-10-09 02:54:53 +08:00
|
|
|
static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2014-11-06 07:31:41 +08:00
|
|
|
* Converts QEMU virtual address to Vhost virtual address. This function is
|
|
|
|
* used to convert the ring addresses to our address space.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
|
|
|
|
{
|
|
|
|
struct virtio_memory_regions *region;
|
|
|
|
uint64_t vhost_va = 0;
|
|
|
|
uint32_t regionidx = 0;
|
|
|
|
|
|
|
|
/* Find the region where the address lives. */
|
|
|
|
for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
|
|
|
|
region = &dev->mem->regions[regionidx];
|
|
|
|
if ((qemu_va >= region->userspace_address) &&
|
2014-11-06 07:31:41 +08:00
|
|
|
(qemu_va <= region->userspace_address +
|
|
|
|
region->memory_size)) {
|
2015-02-23 17:36:31 +00:00
|
|
|
vhost_va = qemu_va + region->guest_phys_address +
|
|
|
|
region->address_offset -
|
|
|
|
region->userspace_address;
|
2014-02-10 13:57:48 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return vhost_va;
|
|
|
|
}
|
|
|
|
|
2015-02-23 17:36:29 +00:00
|
|
|
struct virtio_net *
|
2016-04-30 07:24:27 +08:00
|
|
|
get_device(int vid)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
2016-04-30 07:24:27 +08:00
|
|
|
struct virtio_net *dev = vhost_devices[vid];
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
if (unlikely(!dev)) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
2016-04-30 07:24:27 +08:00
|
|
|
"(%d) device not found.\n", vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
return dev;
|
2014-02-10 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 16:01:10 +08:00
|
|
|
static void
|
2015-11-24 15:45:35 +09:00
|
|
|
cleanup_vq(struct vhost_virtqueue *vq, int destroy)
|
2015-09-18 16:01:10 +08:00
|
|
|
{
|
2015-11-24 15:45:35 +09:00
|
|
|
if ((vq->callfd >= 0) && (destroy != 0))
|
2015-09-18 16:01:10 +08:00
|
|
|
close(vq->callfd);
|
|
|
|
if (vq->kickfd >= 0)
|
|
|
|
close(vq->kickfd);
|
|
|
|
}
|
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
/*
|
2014-11-06 07:31:41 +08:00
|
|
|
* Unmap any memory, close any file descriptors and
|
|
|
|
* free any memory owned by a device.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
|
|
|
static void
|
2015-11-24 15:45:35 +09:00
|
|
|
cleanup_device(struct virtio_net *dev, int destroy)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
2015-09-18 16:01:10 +08:00
|
|
|
uint32_t i;
|
|
|
|
|
2016-02-10 10:40:55 -08:00
|
|
|
vhost_backend_cleanup(dev);
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2015-09-18 16:01:10 +08:00
|
|
|
for (i = 0; i < dev->virt_qp_nb; i++) {
|
2015-11-24 15:45:35 +09:00
|
|
|
cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ], destroy);
|
|
|
|
cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ], destroy);
|
2015-09-18 16:01:10 +08:00
|
|
|
}
|
2014-02-10 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release virtqueues and device memory.
|
|
|
|
*/
|
|
|
|
static void
|
2016-03-10 12:19:59 +08:00
|
|
|
free_device(struct virtio_net *dev)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
2015-09-18 16:01:10 +08:00
|
|
|
uint32_t i;
|
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
for (i = 0; i < dev->virt_qp_nb; i++)
|
|
|
|
rte_free(dev->virtqueue[i * VIRTIO_QNUM]);
|
2014-11-06 07:31:41 +08:00
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
rte_free(dev);
|
2014-02-10 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 16:01:10 +08:00
|
|
|
static void
|
2015-10-22 20:35:54 +08:00
|
|
|
init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
|
2015-09-18 16:01:10 +08:00
|
|
|
{
|
|
|
|
memset(vq, 0, sizeof(struct vhost_virtqueue));
|
|
|
|
|
2016-03-14 17:53:32 +09:00
|
|
|
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
|
|
|
|
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
|
2015-09-18 16:01:10 +08:00
|
|
|
|
|
|
|
/* Backends are set to -1 indicating an inactive device. */
|
|
|
|
vq->backend = -1;
|
2015-10-22 20:35:54 +08:00
|
|
|
|
|
|
|
/* always set the default vq pair to enabled */
|
|
|
|
if (qp_idx == 0)
|
|
|
|
vq->enabled = 1;
|
2015-09-18 16:01:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
|
|
|
|
{
|
2015-10-22 20:35:54 +08:00
|
|
|
uint32_t base_idx = qp_idx * VIRTIO_QNUM;
|
|
|
|
|
|
|
|
init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
|
|
|
|
init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
|
2015-09-18 16:01:10 +08:00
|
|
|
}
|
|
|
|
|
2015-11-24 15:45:35 +09:00
|
|
|
static void
|
|
|
|
reset_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
|
|
|
|
{
|
|
|
|
int callfd;
|
|
|
|
|
|
|
|
callfd = vq->callfd;
|
|
|
|
init_vring_queue(vq, qp_idx);
|
|
|
|
vq->callfd = callfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
reset_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
|
|
|
|
{
|
|
|
|
uint32_t base_idx = qp_idx * VIRTIO_QNUM;
|
|
|
|
|
|
|
|
reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
|
|
|
|
reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
|
|
|
|
}
|
|
|
|
|
2015-09-18 16:01:10 +08:00
|
|
|
static int
|
|
|
|
alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
|
|
|
|
{
|
|
|
|
struct vhost_virtqueue *virtqueue = NULL;
|
|
|
|
uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
|
|
|
|
uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
|
|
|
|
|
|
|
|
virtqueue = rte_malloc(NULL,
|
|
|
|
sizeof(struct vhost_virtqueue) * VIRTIO_QNUM, 0);
|
|
|
|
if (virtqueue == NULL) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"Failed to allocate memory for virt qp:%d.\n", qp_idx);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->virtqueue[virt_rx_q_idx] = virtqueue;
|
|
|
|
dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
|
|
|
|
|
|
|
|
init_vring_queue_pair(dev, qp_idx);
|
|
|
|
|
|
|
|
dev->virt_qp_nb += 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
/*
|
vhost: reset device properly
Currently, we reset all fields of a device to zero when reset
happens, which is wrong, since for some fields like device_fh,
ifname, and virt_qp_nb, they should be same and be kept after
reset until the device is removed. And this is what's the new
helper function reset_device() for.
And use rte_zmalloc() instead of rte_malloc, so that we could
avoid init_device(), which basically dose zero reset only so far.
Hence, init_device() is dropped in this patch.
This patch also removes a hack of using the offset a specific
field (which is virtqueue now) inside of `virtio_net' structure
to do reset, which could be broken easily if someone changed the
field order without caution.
Cc: Tetsuya Mukawa <mukawa@igel.co.jp>
Cc: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
2015-11-12 12:10:41 +08:00
|
|
|
* Reset some variables in device structure, while keeping few
|
2016-05-23 16:36:33 +08:00
|
|
|
* others untouched, such as vid, ifname, virt_qp_nb: they
|
vhost: reset device properly
Currently, we reset all fields of a device to zero when reset
happens, which is wrong, since for some fields like device_fh,
ifname, and virt_qp_nb, they should be same and be kept after
reset until the device is removed. And this is what's the new
helper function reset_device() for.
And use rte_zmalloc() instead of rte_malloc, so that we could
avoid init_device(), which basically dose zero reset only so far.
Hence, init_device() is dropped in this patch.
This patch also removes a hack of using the offset a specific
field (which is virtqueue now) inside of `virtio_net' structure
to do reset, which could be broken easily if someone changed the
field order without caution.
Cc: Tetsuya Mukawa <mukawa@igel.co.jp>
Cc: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
2015-11-12 12:10:41 +08:00
|
|
|
* should be same unless the device is removed.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
|
|
|
static void
|
vhost: reset device properly
Currently, we reset all fields of a device to zero when reset
happens, which is wrong, since for some fields like device_fh,
ifname, and virt_qp_nb, they should be same and be kept after
reset until the device is removed. And this is what's the new
helper function reset_device() for.
And use rte_zmalloc() instead of rte_malloc, so that we could
avoid init_device(), which basically dose zero reset only so far.
Hence, init_device() is dropped in this patch.
This patch also removes a hack of using the offset a specific
field (which is virtqueue now) inside of `virtio_net' structure
to do reset, which could be broken easily if someone changed the
field order without caution.
Cc: Tetsuya Mukawa <mukawa@igel.co.jp>
Cc: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
2015-11-12 12:10:41 +08:00
|
|
|
reset_device(struct virtio_net *dev)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
2015-09-18 16:01:10 +08:00
|
|
|
uint32_t i;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
vhost: reset device properly
Currently, we reset all fields of a device to zero when reset
happens, which is wrong, since for some fields like device_fh,
ifname, and virt_qp_nb, they should be same and be kept after
reset until the device is removed. And this is what's the new
helper function reset_device() for.
And use rte_zmalloc() instead of rte_malloc, so that we could
avoid init_device(), which basically dose zero reset only so far.
Hence, init_device() is dropped in this patch.
This patch also removes a hack of using the offset a specific
field (which is virtqueue now) inside of `virtio_net' structure
to do reset, which could be broken easily if someone changed the
field order without caution.
Cc: Tetsuya Mukawa <mukawa@igel.co.jp>
Cc: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
2015-11-12 12:10:41 +08:00
|
|
|
dev->features = 0;
|
|
|
|
dev->protocol_features = 0;
|
|
|
|
dev->flags = 0;
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2015-09-18 16:01:10 +08:00
|
|
|
for (i = 0; i < dev->virt_qp_nb; i++)
|
2015-11-24 15:45:35 +09:00
|
|
|
reset_vring_queue_pair(dev, i);
|
2014-02-10 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function is called from the CUSE open function. The device structure is
|
|
|
|
* initialised and a new entry is added to the device configuration linked
|
|
|
|
* list.
|
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_new_device(void)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
2016-03-10 12:19:59 +08:00
|
|
|
struct virtio_net *dev;
|
|
|
|
int i;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
|
|
|
|
if (dev == NULL) {
|
2014-10-09 02:54:57 +08:00
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
2016-04-30 07:24:27 +08:00
|
|
|
"Failed to allocate memory for new dev.\n");
|
2014-02-10 13:57:48 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
|
|
|
|
if (vhost_devices[i] == NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == MAX_VHOST_DEVICE) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"Failed to find a free slot for new device.\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
vhost_devices[i] = dev;
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid = i;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
return i;
|
2014-02-10 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-11-06 07:31:41 +08:00
|
|
|
* Function is called from the CUSE release function. This function will
|
|
|
|
* cleanup the device and remove it from device configuration linked list.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
void
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_destroy_device(int vid)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
2016-04-30 07:24:27 +08:00
|
|
|
struct virtio_net *dev = get_device(vid);
|
2016-03-10 12:19:59 +08:00
|
|
|
|
2016-04-05 14:00:43 +08:00
|
|
|
if (dev == NULL)
|
|
|
|
return;
|
|
|
|
|
2016-04-29 11:59:47 +08:00
|
|
|
if (dev->flags & VIRTIO_DEV_RUNNING) {
|
|
|
|
dev->flags &= ~VIRTIO_DEV_RUNNING;
|
2016-06-13 17:55:49 +08:00
|
|
|
notify_ops->destroy_device(vid);
|
2016-04-29 11:59:47 +08:00
|
|
|
}
|
2016-03-10 12:19:59 +08:00
|
|
|
|
|
|
|
cleanup_device(dev, 1);
|
|
|
|
free_device(dev);
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_devices[vid] = NULL;
|
2014-02-10 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
2016-02-19 10:10:16 -08:00
|
|
|
void
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
|
2015-02-23 17:36:32 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
unsigned int len;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2015-02-23 17:36:32 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
len = if_len > sizeof(dev->ifname) ?
|
|
|
|
sizeof(dev->ifname) : if_len;
|
|
|
|
|
|
|
|
strncpy(dev->ifname, if_name, len);
|
2016-05-10 18:11:18 +02:00
|
|
|
dev->ifname[sizeof(dev->ifname) - 1] = '\0';
|
2015-02-23 17:36:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_SET_OWNER
|
2014-11-06 07:31:41 +08:00
|
|
|
* This function just returns success at the moment unless
|
|
|
|
* the device hasn't been initialised.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_owner(int vid)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_RESET_OWNER
|
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_reset_owner(int vid)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
2015-10-19 11:44:27 +02:00
|
|
|
struct virtio_net *dev;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2015-10-19 11:44:27 +02:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-04-29 11:59:47 +08:00
|
|
|
if (dev->flags & VIRTIO_DEV_RUNNING) {
|
|
|
|
dev->flags &= ~VIRTIO_DEV_RUNNING;
|
2016-06-13 17:55:49 +08:00
|
|
|
notify_ops->destroy_device(vid);
|
2016-04-29 11:59:47 +08:00
|
|
|
}
|
2015-11-09 18:15:13 -08:00
|
|
|
|
2015-11-24 15:45:35 +09:00
|
|
|
cleanup_device(dev, 0);
|
vhost: reset device properly
Currently, we reset all fields of a device to zero when reset
happens, which is wrong, since for some fields like device_fh,
ifname, and virt_qp_nb, they should be same and be kept after
reset until the device is removed. And this is what's the new
helper function reset_device() for.
And use rte_zmalloc() instead of rte_malloc, so that we could
avoid init_device(), which basically dose zero reset only so far.
Hence, init_device() is dropped in this patch.
This patch also removes a hack of using the offset a specific
field (which is virtqueue now) inside of `virtio_net' structure
to do reset, which could be broken easily if someone changed the
field order without caution.
Cc: Tetsuya Mukawa <mukawa@igel.co.jp>
Cc: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Rich Lane <rich.lane@bigswitch.com>
2015-11-12 12:10:41 +08:00
|
|
|
reset_device(dev);
|
2014-02-10 13:57:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_GET_FEATURES
|
|
|
|
* The features that we support are requested.
|
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_get_features(int vid, uint64_t *pu)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Send our supported features. */
|
|
|
|
*pu = VHOST_FEATURES;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_SET_FEATURES
|
2014-11-06 07:31:41 +08:00
|
|
|
* We receive the negotiated features supported by us and the virtio device.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_features(int vid, uint64_t *pu)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
if (*pu & ~VHOST_FEATURES)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
dev->features = *pu;
|
2015-10-15 14:08:39 +03:00
|
|
|
if (dev->features &
|
|
|
|
((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
|
2016-05-02 07:58:52 +08:00
|
|
|
dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
2014-02-10 13:57:48 +00:00
|
|
|
} else {
|
2016-05-02 07:58:52 +08:00
|
|
|
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
|
2015-09-18 16:01:10 +08:00
|
|
|
}
|
2015-10-15 14:08:39 +03:00
|
|
|
LOG_DEBUG(VHOST_CONFIG,
|
2016-04-30 04:45:51 +08:00
|
|
|
"(%d) mergeable RX buffers %s, virtio 1 %s\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid,
|
2015-10-15 14:08:39 +03:00
|
|
|
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
|
|
|
|
(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
|
2015-09-18 16:01:10 +08:00
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_SET_VRING_NUM
|
|
|
|
* The virtio device sends us the size of the descriptor ring.
|
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_vring_num(int vid, struct vhost_vring_state *state)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* State->index refers to the queue index. The txq is 1, rxq is 0. */
|
2014-02-10 13:57:48 +00:00
|
|
|
dev->virtqueue[state->index]->size = state->num;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-25 13:47:37 +08:00
|
|
|
/*
|
2015-09-09 13:34:34 +08:00
|
|
|
* Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
|
2015-06-25 13:47:37 +08:00
|
|
|
* same numa node as the memory of vring descriptor.
|
|
|
|
*/
|
|
|
|
#ifdef RTE_LIBRTE_VHOST_NUMA
|
|
|
|
static struct virtio_net*
|
|
|
|
numa_realloc(struct virtio_net *dev, int index)
|
|
|
|
{
|
|
|
|
int oldnode, newnode;
|
2016-03-10 12:20:00 +08:00
|
|
|
struct virtio_net *old_dev;
|
|
|
|
struct vhost_virtqueue *old_vq, *vq;
|
2015-06-25 13:47:37 +08:00
|
|
|
int ret;
|
|
|
|
|
2016-03-10 12:20:01 +08:00
|
|
|
/*
|
|
|
|
* vq is allocated on pairs, we should try to do realloc
|
|
|
|
* on first queue of one queue pair only.
|
|
|
|
*/
|
|
|
|
if (index % VIRTIO_QNUM != 0)
|
|
|
|
return dev;
|
|
|
|
|
2016-03-10 12:19:59 +08:00
|
|
|
old_dev = dev;
|
2016-03-10 12:20:00 +08:00
|
|
|
vq = old_vq = dev->virtqueue[index];
|
2015-06-25 13:47:37 +08:00
|
|
|
|
2016-03-10 12:20:00 +08:00
|
|
|
ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
|
|
|
|
MPOL_F_NODE | MPOL_F_ADDR);
|
|
|
|
|
|
|
|
/* check if we need to reallocate vq */
|
|
|
|
ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
|
|
|
|
MPOL_F_NODE | MPOL_F_ADDR);
|
2015-06-25 13:47:37 +08:00
|
|
|
if (ret) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
2016-03-10 12:20:00 +08:00
|
|
|
"Unable to get vq numa information.\n");
|
2015-06-25 13:47:37 +08:00
|
|
|
return dev;
|
|
|
|
}
|
2016-03-10 12:20:00 +08:00
|
|
|
if (oldnode != newnode) {
|
|
|
|
RTE_LOG(INFO, VHOST_CONFIG,
|
|
|
|
"reallocate vq from %d to %d node\n", oldnode, newnode);
|
2016-03-10 12:20:01 +08:00
|
|
|
vq = rte_malloc_socket(NULL, sizeof(*vq) * VIRTIO_QNUM, 0,
|
|
|
|
newnode);
|
2016-03-10 12:20:00 +08:00
|
|
|
if (!vq)
|
|
|
|
return dev;
|
|
|
|
|
2016-03-10 12:20:01 +08:00
|
|
|
memcpy(vq, old_vq, sizeof(*vq) * VIRTIO_QNUM);
|
2016-03-10 12:20:00 +08:00
|
|
|
rte_free(old_vq);
|
|
|
|
}
|
2015-06-25 13:47:37 +08:00
|
|
|
|
2016-03-10 12:20:00 +08:00
|
|
|
/* check if we need to reallocate dev */
|
|
|
|
ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
|
|
|
|
MPOL_F_NODE | MPOL_F_ADDR);
|
2015-06-25 13:47:37 +08:00
|
|
|
if (ret) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
2016-03-10 12:20:00 +08:00
|
|
|
"Unable to get dev numa information.\n");
|
|
|
|
goto out;
|
2015-06-25 13:47:37 +08:00
|
|
|
}
|
2016-03-10 12:20:00 +08:00
|
|
|
if (oldnode != newnode) {
|
|
|
|
RTE_LOG(INFO, VHOST_CONFIG,
|
|
|
|
"reallocate dev from %d to %d node\n",
|
|
|
|
oldnode, newnode);
|
|
|
|
dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
|
|
|
|
if (!dev) {
|
|
|
|
dev = old_dev;
|
|
|
|
goto out;
|
|
|
|
}
|
2016-03-10 12:19:59 +08:00
|
|
|
|
2016-03-10 12:20:00 +08:00
|
|
|
memcpy(dev, old_dev, sizeof(*dev));
|
2016-03-10 12:19:59 +08:00
|
|
|
rte_free(old_dev);
|
2015-06-25 13:47:37 +08:00
|
|
|
}
|
|
|
|
|
2016-03-10 12:20:00 +08:00
|
|
|
out:
|
|
|
|
dev->virtqueue[index] = vq;
|
2016-03-10 12:20:01 +08:00
|
|
|
dev->virtqueue[index + 1] = vq + 1;
|
2016-05-23 16:36:33 +08:00
|
|
|
vhost_devices[dev->vid] = dev;
|
2016-03-10 12:20:00 +08:00
|
|
|
|
|
|
|
return dev;
|
2015-06-25 13:47:37 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static struct virtio_net*
|
|
|
|
numa_realloc(struct virtio_net *dev, int index __rte_unused)
|
|
|
|
{
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
|
2014-11-06 07:31:41 +08:00
|
|
|
* The virtio device sends us the desc, used and avail ring addresses.
|
|
|
|
* This function then converts these to our address space.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_vring_addr(int vid, struct vhost_vring_addr *addr)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
struct vhost_virtqueue *vq;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2016-01-13 10:32:57 +03:00
|
|
|
if ((dev == NULL) || (dev->mem == NULL))
|
2014-02-10 13:57:48 +00:00
|
|
|
return -1;
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* addr->index refers to the queue index. The txq 1, rxq is 0. */
|
2014-02-10 13:57:48 +00:00
|
|
|
vq = dev->virtqueue[addr->index];
|
|
|
|
|
|
|
|
/* The addresses are converted from QEMU virtual to Vhost virtual. */
|
2014-11-06 07:31:41 +08:00
|
|
|
vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
|
|
|
|
addr->desc_user_addr);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (vq->desc == 0) {
|
2014-11-06 07:31:41 +08:00
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
2016-04-30 04:45:51 +08:00
|
|
|
"(%d) failed to find desc ring address.\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-06-25 13:47:37 +08:00
|
|
|
dev = numa_realloc(dev, addr->index);
|
|
|
|
vq = dev->virtqueue[addr->index];
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
|
|
|
|
addr->avail_user_addr);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (vq->avail == 0) {
|
2014-11-06 07:31:41 +08:00
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
2016-04-30 04:45:51 +08:00
|
|
|
"(%d) failed to find avail ring address.\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
|
|
|
|
addr->used_user_addr);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (vq->used == 0) {
|
2014-11-06 07:31:41 +08:00
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
2016-04-30 04:45:51 +08:00
|
|
|
"(%d) failed to find used ring address.\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
vhost: workaround stale vring base
When DPDK app crashes (or quits, or gets killed), a restart of DPDK
app would get stale vring base from QEMU. That would break the kernel
virtio net completely, making it non-work any more, unless a driver
reset is done.
So, instead of getting the stale vring base from QEMU, Huawei suggested
we could get a much saner (and may not the most accurate) vring base
from used->idx. That would work because:
- there is a memory barrier between updating used ring entries and
used->idx. So, even though we crashed at updating the used ring
entries, it will not cause any issue, as the guest driver will not
process those stale used entries, for used-idx is not updated yet.
- DPDK process vring in order, that means a crash may just lead some
packet retransmission for Tx and drop for Rx.
Suggested-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-05-07 06:04:05 +08:00
|
|
|
if (vq->last_used_idx != vq->used->idx) {
|
|
|
|
RTE_LOG(WARNING, VHOST_CONFIG,
|
|
|
|
"last_used_idx (%u) and vq->used->idx (%u) mismatches; "
|
|
|
|
"some packets maybe resent for Tx and dropped for Rx\n",
|
|
|
|
vq->last_used_idx, vq->used->idx);
|
|
|
|
vq->last_used_idx = vq->used->idx;
|
|
|
|
}
|
|
|
|
|
2016-01-29 12:57:57 +08:00
|
|
|
vq->log_guest_addr = addr->log_guest_addr;
|
|
|
|
|
2016-04-30 04:45:51 +08:00
|
|
|
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid, vq->desc);
|
2016-04-30 04:45:51 +08:00
|
|
|
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid, vq->avail);
|
2016-04-30 04:45:51 +08:00
|
|
|
LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid, vq->used);
|
2016-04-30 04:45:51 +08:00
|
|
|
LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
|
2016-05-23 16:36:33 +08:00
|
|
|
dev->vid, vq->log_guest_addr);
|
2014-02-10 13:57:48 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_SET_VRING_BASE
|
|
|
|
* The virtio device sends us the available ring last used index.
|
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_vring_base(int vid, struct vhost_vring_state *state)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* State->index refers to the queue index. The txq is 1, rxq is 0. */
|
2014-02-10 13:57:48 +00:00
|
|
|
dev->virtqueue[state->index]->last_used_idx = state->num;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_GET_VRING_BASE
|
|
|
|
* We send the virtio device our available ring last used index.
|
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_get_vring_base(int vid, uint32_t index,
|
2014-10-09 02:54:57 +08:00
|
|
|
struct vhost_vring_state *state)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
state->index = index;
|
2014-11-06 07:31:41 +08:00
|
|
|
/* State->index refers to the queue index. The txq is 1, rxq is 0. */
|
2014-02-10 13:57:48 +00:00
|
|
|
state->num = dev->virtqueue[state->index]->last_used_idx;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_SET_VRING_CALL
|
2014-11-06 07:31:41 +08:00
|
|
|
* The virtio device sends an eventfd to interrupt the guest. This fd gets
|
|
|
|
* copied into our process space.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_vring_call(int vid, struct vhost_vring_file *file)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
struct vhost_virtqueue *vq;
|
2015-09-18 16:01:10 +08:00
|
|
|
uint32_t cur_qp_idx = file->index / VIRTIO_QNUM;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2015-09-18 16:01:10 +08:00
|
|
|
/*
|
|
|
|
* FIXME: VHOST_SET_VRING_CALL is the first per-vring message
|
|
|
|
* we get, so we do vring queue pair allocation here.
|
|
|
|
*/
|
|
|
|
if (cur_qp_idx + 1 > dev->virt_qp_nb) {
|
|
|
|
if (alloc_vring_queue_pair(dev, cur_qp_idx) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* file->index refers to the queue index. The txq is 1, rxq is 0. */
|
2014-02-10 13:57:48 +00:00
|
|
|
vq = dev->virtqueue[file->index];
|
2015-09-18 16:01:10 +08:00
|
|
|
assert(vq != NULL);
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2015-09-09 13:34:36 +08:00
|
|
|
if (vq->callfd >= 0)
|
|
|
|
close(vq->callfd);
|
2014-02-10 13:57:48 +00:00
|
|
|
|
2015-03-06 18:39:18 +08:00
|
|
|
vq->callfd = file->fd;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_SET_VRING_KICK
|
2014-11-06 07:31:41 +08:00
|
|
|
* The virtio device sends an eventfd that it can use to notify us.
|
|
|
|
* This fd gets copied into our process space.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_vring_kick(int vid, struct vhost_vring_file *file)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
struct vhost_virtqueue *vq;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-02-10 13:57:48 +00:00
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* file->index refers to the queue index. The txq is 1, rxq is 0. */
|
2014-02-10 13:57:48 +00:00
|
|
|
vq = dev->virtqueue[file->index];
|
|
|
|
|
2015-09-09 13:34:36 +08:00
|
|
|
if (vq->kickfd >= 0)
|
|
|
|
close(vq->kickfd);
|
2015-02-23 17:36:31 +00:00
|
|
|
|
2015-03-06 18:39:18 +08:00
|
|
|
vq->kickfd = file->fd;
|
2014-02-10 13:57:48 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
|
2014-11-06 07:31:41 +08:00
|
|
|
* To complete device initialisation when the virtio driver is loaded,
|
|
|
|
* we are provided with a valid fd for a tap device (not used by us).
|
|
|
|
* If this happens then we can add the device to a data core.
|
|
|
|
* When the virtio driver is removed we get fd=-1.
|
|
|
|
* At that point we remove the device from the data core.
|
|
|
|
* The device will still exist in the device configuration linked list.
|
2014-02-10 13:57:48 +00:00
|
|
|
*/
|
2016-02-19 10:10:16 -08:00
|
|
|
int
|
2016-04-30 07:24:27 +08:00
|
|
|
vhost_set_backend(int vid, struct vhost_vring_file *file)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
|
2016-04-30 07:24:27 +08:00
|
|
|
dev = get_device(vid);
|
2014-10-09 02:54:57 +08:00
|
|
|
if (dev == NULL)
|
2014-02-10 13:57:48 +00:00
|
|
|
return -1;
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/* file->index refers to the queue index. The txq is 1, rxq is 0. */
|
2014-02-10 13:57:48 +00:00
|
|
|
dev->virtqueue[file->index]->backend = file->fd;
|
|
|
|
|
2014-11-06 07:31:41 +08:00
|
|
|
/*
|
|
|
|
* If the device isn't already running and both backend fds are set,
|
|
|
|
* we add the device.
|
|
|
|
*/
|
2014-02-10 13:57:48 +00:00
|
|
|
if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
|
2016-04-29 11:48:08 +08:00
|
|
|
if (dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED &&
|
|
|
|
dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED) {
|
2016-06-13 17:55:49 +08:00
|
|
|
if (notify_ops->new_device(vid) < 0)
|
2016-04-29 11:59:47 +08:00
|
|
|
return -1;
|
|
|
|
dev->flags |= VIRTIO_DEV_RUNNING;
|
2014-12-18 18:07:07 +00:00
|
|
|
}
|
2016-04-29 11:59:47 +08:00
|
|
|
} else if (file->fd == VIRTIO_DEV_STOPPED) {
|
|
|
|
dev->flags &= ~VIRTIO_DEV_RUNNING;
|
2016-06-13 17:55:49 +08:00
|
|
|
notify_ops->destroy_device(vid);
|
2016-04-29 11:59:47 +08:00
|
|
|
}
|
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-11 06:12:57 +08:00
|
|
|
int
|
|
|
|
rte_vhost_get_numa_node(int vid)
|
|
|
|
{
|
|
|
|
#ifdef RTE_LIBRTE_VHOST_NUMA
|
|
|
|
struct virtio_net *dev = get_device(vid);
|
|
|
|
int numa_node;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = get_mempolicy(&numa_node, NULL, 0, dev,
|
|
|
|
MPOL_F_NODE | MPOL_F_ADDR);
|
|
|
|
if (ret < 0) {
|
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"(%d) failed to query numa node: %d\n", vid, ret);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return numa_node;
|
|
|
|
#else
|
|
|
|
RTE_SET_USED(vid);
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-05-11 06:23:32 +08:00
|
|
|
uint32_t
|
|
|
|
rte_vhost_get_queue_num(int vid)
|
|
|
|
{
|
|
|
|
struct virtio_net *dev = get_device(vid);
|
|
|
|
|
|
|
|
if (dev == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return dev->virt_qp_nb;
|
|
|
|
}
|
|
|
|
|
2016-05-11 06:38:44 +08:00
|
|
|
int
|
|
|
|
rte_vhost_get_ifname(int vid, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct virtio_net *dev = get_device(vid);
|
|
|
|
|
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
len = RTE_MIN(len, sizeof(dev->ifname));
|
|
|
|
|
|
|
|
strncpy(buf, dev->ifname, len);
|
|
|
|
buf[len - 1] = '\0';
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-11 06:46:24 +08:00
|
|
|
uint16_t
|
|
|
|
rte_vhost_avail_entries(int vid, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct virtio_net *dev;
|
|
|
|
struct vhost_virtqueue *vq;
|
|
|
|
|
|
|
|
dev = get_device(vid);
|
|
|
|
if (!dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vq = dev->virtqueue[queue_id];
|
|
|
|
if (!vq->enabled)
|
|
|
|
return 0;
|
|
|
|
|
2016-06-13 19:52:12 +08:00
|
|
|
return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
|
2016-05-11 06:46:24 +08:00
|
|
|
}
|
|
|
|
|
2016-06-13 17:55:49 +08:00
|
|
|
int
|
|
|
|
rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
|
2014-10-09 02:54:51 +08:00
|
|
|
{
|
2016-06-13 17:55:49 +08:00
|
|
|
struct virtio_net *dev = get_device(vid);
|
|
|
|
|
|
|
|
if (dev == NULL)
|
|
|
|
return -1;
|
|
|
|
|
2014-10-09 02:54:51 +08:00
|
|
|
if (enable) {
|
2014-11-06 07:31:41 +08:00
|
|
|
RTE_LOG(ERR, VHOST_CONFIG,
|
|
|
|
"guest notification isn't supported.\n");
|
2014-10-09 02:54:51 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-12-11 01:57:20 +08:00
|
|
|
dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
|
2014-10-09 02:54:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t rte_vhost_feature_get(void)
|
|
|
|
{
|
|
|
|
return VHOST_FEATURES;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rte_vhost_feature_disable(uint64_t feature_mask)
|
|
|
|
{
|
|
|
|
VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rte_vhost_feature_enable(uint64_t feature_mask)
|
|
|
|
{
|
|
|
|
if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
|
|
|
|
VHOST_FEATURES = VHOST_FEATURES | feature_mask;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-02-10 13:57:48 +00:00
|
|
|
/*
|
|
|
|
* Register ops so that we can add/remove device to data core.
|
|
|
|
*/
|
|
|
|
int
|
2014-10-09 02:54:50 +08:00
|
|
|
rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)
|
2014-02-10 13:57:48 +00:00
|
|
|
{
|
|
|
|
notify_ops = ops;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|