2013-09-18 12:00:00 +02:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2014-02-10 11:46:50 +00:00
|
|
|
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
|
2013-09-18 12:00:00 +02:00
|
|
|
* All rights reserved.
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2013-09-18 12:00:00 +02:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2013-09-18 12:00:00 +02:00
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2013-09-18 12:00:00 +02:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#include <stdint.h>
|
|
|
|
|
2016-02-02 21:48:15 +08:00
|
|
|
#ifdef RTE_EXEC_ENV_LINUXAPP
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#endif
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
#include "virtio_pci.h"
|
|
|
|
#include "virtio_logs.h"
|
2016-02-02 21:48:14 +08:00
|
|
|
#include "virtqueue.h"
|
2013-09-18 12:00:00 +02:00
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
/*
|
|
|
|
* Following macros are derived from linux/pci_regs.h, however,
|
|
|
|
* we can't simply include that header here, as there is no such
|
|
|
|
* file for non-Linux platform.
|
|
|
|
*/
|
|
|
|
#define PCI_CAPABILITY_LIST 0x34
|
|
|
|
#define PCI_CAP_ID_VNDR 0x09
|
|
|
|
|
2016-02-16 21:37:04 +01:00
|
|
|
/*
|
|
|
|
* The remaining space is defined by each driver as the per-driver
|
|
|
|
* configuration space.
|
|
|
|
*/
|
|
|
|
#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
|
2016-02-02 21:48:20 +08:00
|
|
|
|
2016-06-15 09:03:20 +00:00
|
|
|
static inline int
|
|
|
|
check_vq_phys_addr_ok(struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
/* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
|
|
|
|
* and only accepts 32 bit page frame number.
|
|
|
|
* Check if the allocated physical memory exceeds 16TB.
|
|
|
|
*/
|
|
|
|
if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
|
|
|
|
(VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
|
|
|
|
PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-05-17 11:59:51 +02:00
|
|
|
/*
|
|
|
|
* Since we are in legacy mode:
|
|
|
|
* http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
|
|
|
|
*
|
|
|
|
* "Note that this is possible because while the virtio header is PCI (i.e.
|
|
|
|
* little) endian, the device-specific region is encoded in the native endian of
|
|
|
|
* the guest (where such distinction is applicable)."
|
|
|
|
*
|
|
|
|
* For powerpc which supports both, qemu supposes that cpu is big endian and
|
|
|
|
* enforces this for the virtio-net stuff.
|
|
|
|
*/
|
2016-02-02 21:48:14 +08:00
|
|
|
static void
|
|
|
|
legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
|
|
|
|
void *dst, int length)
|
2013-09-18 12:00:00 +02:00
|
|
|
{
|
2016-05-17 11:59:51 +02:00
|
|
|
#ifdef RTE_ARCH_PPC_64
|
|
|
|
int size;
|
|
|
|
|
|
|
|
while (length > 0) {
|
|
|
|
if (length >= 4) {
|
|
|
|
size = 4;
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
|
2016-05-17 11:59:51 +02:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
|
|
|
*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
|
|
|
|
} else if (length >= 2) {
|
|
|
|
size = 2;
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
|
2016-05-17 11:59:51 +02:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
|
|
|
*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
|
|
|
|
} else {
|
|
|
|
size = 1;
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
|
2016-05-17 11:59:51 +02:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
dst = (char *)dst + size;
|
|
|
|
offset += size;
|
|
|
|
length -= size;
|
|
|
|
}
|
|
|
|
#else
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length,
|
2016-02-16 21:37:04 +01:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
2016-05-17 11:59:51 +02:00
|
|
|
#endif
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
2016-02-02 21:48:14 +08:00
|
|
|
static void
|
|
|
|
legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
|
|
|
|
const void *src, int length)
|
2013-09-18 12:00:00 +02:00
|
|
|
{
|
2016-05-17 11:59:51 +02:00
|
|
|
#ifdef RTE_ARCH_PPC_64
|
|
|
|
union {
|
|
|
|
uint32_t u32;
|
|
|
|
uint16_t u16;
|
|
|
|
} tmp;
|
|
|
|
int size;
|
|
|
|
|
|
|
|
while (length > 0) {
|
|
|
|
if (length >= 4) {
|
|
|
|
size = 4;
|
|
|
|
tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
|
2016-05-17 11:59:51 +02:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
|
|
|
} else if (length >= 2) {
|
|
|
|
size = 2;
|
|
|
|
tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
|
2016-05-17 11:59:51 +02:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
|
|
|
} else {
|
|
|
|
size = 1;
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size,
|
2016-05-17 11:59:51 +02:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
src = (const char *)src + size;
|
|
|
|
offset += size;
|
|
|
|
length -= size;
|
|
|
|
}
|
|
|
|
#else
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length,
|
2016-02-16 21:37:04 +01:00
|
|
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
2016-05-17 11:59:51 +02:00
|
|
|
#endif
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
2016-02-02 21:48:16 +08:00
|
|
|
static uint64_t
|
2016-02-02 21:48:14 +08:00
|
|
|
legacy_get_features(struct virtio_hw *hw)
|
|
|
|
{
|
2016-03-10 15:01:20 +08:00
|
|
|
uint32_t dst;
|
2016-02-16 21:37:04 +01:00
|
|
|
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4,
|
|
|
|
VIRTIO_PCI_HOST_FEATURES);
|
2016-02-16 21:37:04 +01:00
|
|
|
return dst;
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-02-02 21:48:16 +08:00
|
|
|
legacy_set_features(struct virtio_hw *hw, uint64_t features)
|
2016-02-02 21:48:14 +08:00
|
|
|
{
|
2016-02-02 21:48:16 +08:00
|
|
|
if ((features >> 32) != 0) {
|
|
|
|
PMD_DRV_LOG(ERR,
|
|
|
|
"only 32 bit features are allowed for legacy virtio!");
|
|
|
|
return;
|
|
|
|
}
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4,
|
2016-02-16 21:37:04 +01:00
|
|
|
VIRTIO_PCI_GUEST_FEATURES);
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t
|
|
|
|
legacy_get_status(struct virtio_hw *hw)
|
|
|
|
{
|
2016-02-16 21:37:04 +01:00
|
|
|
uint8_t dst;
|
|
|
|
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
|
2016-02-16 21:37:04 +01:00
|
|
|
return dst;
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
legacy_set_status(struct virtio_hw *hw, uint8_t status)
|
|
|
|
{
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
legacy_reset(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t
|
|
|
|
legacy_get_isr(struct virtio_hw *hw)
|
|
|
|
{
|
2016-02-16 21:37:04 +01:00
|
|
|
uint8_t dst;
|
|
|
|
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
|
2016-02-16 21:37:04 +01:00
|
|
|
return dst;
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable one vector (0) for Link State Intrerrupt */
|
|
|
|
static uint16_t
|
|
|
|
legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
|
|
|
|
{
|
2016-02-16 21:37:04 +01:00
|
|
|
uint16_t dst;
|
|
|
|
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
|
|
|
|
VIRTIO_MSI_CONFIG_VECTOR);
|
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2,
|
|
|
|
VIRTIO_MSI_CONFIG_VECTOR);
|
2016-02-16 21:37:04 +01:00
|
|
|
return dst;
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
2017-01-17 07:10:24 +00:00
|
|
|
static uint16_t
|
|
|
|
legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
|
|
|
|
{
|
|
|
|
uint16_t dst;
|
|
|
|
|
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
|
|
|
VIRTIO_PCI_QUEUE_SEL);
|
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
|
|
|
|
VIRTIO_MSI_QUEUE_VECTOR);
|
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
2016-02-02 21:48:14 +08:00
|
|
|
static uint16_t
|
|
|
|
legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
|
|
|
|
{
|
2016-02-16 21:37:04 +01:00
|
|
|
uint16_t dst;
|
|
|
|
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2,
|
|
|
|
VIRTIO_PCI_QUEUE_SEL);
|
|
|
|
rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
|
2016-02-16 21:37:04 +01:00
|
|
|
return dst;
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
2016-06-15 09:03:20 +00:00
|
|
|
static int
|
2016-02-02 21:48:14 +08:00
|
|
|
legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|
|
|
{
|
2016-02-16 21:37:04 +01:00
|
|
|
uint32_t src;
|
2016-02-02 21:48:14 +08:00
|
|
|
|
2016-06-15 09:03:20 +00:00
|
|
|
if (!check_vq_phys_addr_ok(vq))
|
|
|
|
return -1;
|
|
|
|
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
2016-02-16 21:37:04 +01:00
|
|
|
VIRTIO_PCI_QUEUE_SEL);
|
2016-06-02 00:12:13 +08:00
|
|
|
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
|
2016-06-15 09:03:20 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|
|
|
{
|
2016-02-16 21:37:04 +01:00
|
|
|
uint32_t src = 0;
|
2016-02-02 21:48:14 +08:00
|
|
|
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
2016-02-16 21:37:04 +01:00
|
|
|
VIRTIO_PCI_QUEUE_SEL);
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|
|
|
{
|
2017-01-06 18:16:18 +08:00
|
|
|
rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
2016-02-16 21:37:04 +01:00
|
|
|
VIRTIO_PCI_QUEUE_NOTIFY);
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
2016-02-02 21:48:15 +08:00
|
|
|
#ifdef RTE_EXEC_ENV_LINUXAPP
|
|
|
|
static int
|
|
|
|
legacy_virtio_has_msix(const struct rte_pci_addr *loc)
|
|
|
|
{
|
|
|
|
DIR *d;
|
|
|
|
char dirname[PATH_MAX];
|
|
|
|
|
|
|
|
snprintf(dirname, sizeof(dirname),
|
2016-06-13 17:07:44 +02:00
|
|
|
"%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(),
|
2016-02-02 21:48:15 +08:00
|
|
|
loc->domain, loc->bus, loc->devid, loc->function);
|
|
|
|
|
|
|
|
d = opendir(dirname);
|
|
|
|
if (d)
|
|
|
|
closedir(d);
|
|
|
|
|
2016-01-27 21:58:30 +08:00
|
|
|
return d != NULL;
|
2016-02-02 21:48:15 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int
|
2016-02-16 21:37:01 +01:00
|
|
|
legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
|
2016-02-02 21:48:15 +08:00
|
|
|
{
|
|
|
|
/* nic_uio does not enable interrupts, return 0 (false). */
|
|
|
|
return 0;
|
|
|
|
}
|
2016-02-16 21:37:04 +01:00
|
|
|
#endif
|
2016-02-02 21:48:15 +08:00
|
|
|
|
|
|
|
static int
|
2016-02-16 21:37:04 +01:00
|
|
|
legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
|
2016-05-09 09:35:57 -07:00
|
|
|
struct virtio_hw *hw, uint32_t *dev_flags)
|
2016-02-02 21:48:15 +08:00
|
|
|
{
|
2017-01-06 18:16:18 +08:00
|
|
|
if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
|
2016-02-16 21:37:04 +01:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
|
2016-05-09 09:35:57 -07:00
|
|
|
*dev_flags |= RTE_ETH_DEV_INTR_LSC;
|
2016-02-16 21:37:04 +01:00
|
|
|
else
|
2016-05-09 09:35:57 -07:00
|
|
|
*dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
|
2016-02-16 21:37:04 +01:00
|
|
|
|
2016-02-02 21:48:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-02-02 21:48:14 +08:00
|
|
|
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:19 +08:00
|
|
|
const struct virtio_pci_ops legacy_ops = {
|
2016-02-02 21:48:14 +08:00
|
|
|
.read_dev_cfg = legacy_read_dev_config,
|
|
|
|
.write_dev_cfg = legacy_write_dev_config,
|
|
|
|
.reset = legacy_reset,
|
|
|
|
.get_status = legacy_get_status,
|
|
|
|
.set_status = legacy_set_status,
|
|
|
|
.get_features = legacy_get_features,
|
|
|
|
.set_features = legacy_set_features,
|
|
|
|
.get_isr = legacy_get_isr,
|
|
|
|
.set_config_irq = legacy_set_config_irq,
|
2017-01-17 07:10:24 +00:00
|
|
|
.set_queue_irq = legacy_set_queue_irq,
|
2016-02-02 21:48:14 +08:00
|
|
|
.get_queue_num = legacy_get_queue_num,
|
|
|
|
.setup_queue = legacy_setup_queue,
|
|
|
|
.del_queue = legacy_del_queue,
|
|
|
|
.notify_queue = legacy_notify_queue,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
static inline uint8_t
|
|
|
|
io_read8(uint8_t *addr)
|
|
|
|
{
|
|
|
|
return *(volatile uint8_t *)addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
io_write8(uint8_t val, uint8_t *addr)
|
|
|
|
{
|
|
|
|
*(volatile uint8_t *)addr = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
io_read16(uint16_t *addr)
|
|
|
|
{
|
|
|
|
return *(volatile uint16_t *)addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
io_write16(uint16_t val, uint16_t *addr)
|
|
|
|
{
|
|
|
|
*(volatile uint16_t *)addr = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
io_read32(uint32_t *addr)
|
|
|
|
{
|
|
|
|
return *(volatile uint32_t *)addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
io_write32(uint32_t val, uint32_t *addr)
|
|
|
|
{
|
|
|
|
*(volatile uint32_t *)addr = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
|
|
|
|
{
|
|
|
|
io_write32(val & ((1ULL << 32) - 1), lo);
|
|
|
|
io_write32(val >> 32, hi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
modern_read_dev_config(struct virtio_hw *hw, size_t offset,
|
|
|
|
void *dst, int length)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint8_t *p;
|
|
|
|
uint8_t old_gen, new_gen;
|
|
|
|
|
|
|
|
do {
|
|
|
|
old_gen = io_read8(&hw->common_cfg->config_generation);
|
|
|
|
|
|
|
|
p = dst;
|
|
|
|
for (i = 0; i < length; i++)
|
|
|
|
*p++ = io_read8((uint8_t *)hw->dev_cfg + offset + i);
|
|
|
|
|
|
|
|
new_gen = io_read8(&hw->common_cfg->config_generation);
|
|
|
|
} while (old_gen != new_gen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
modern_write_dev_config(struct virtio_hw *hw, size_t offset,
|
|
|
|
const void *src, int length)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
const uint8_t *p = src;
|
|
|
|
|
|
|
|
for (i = 0; i < length; i++)
|
|
|
|
io_write8(*p++, (uint8_t *)hw->dev_cfg + offset + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
modern_get_features(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
uint32_t features_lo, features_hi;
|
|
|
|
|
|
|
|
io_write32(0, &hw->common_cfg->device_feature_select);
|
|
|
|
features_lo = io_read32(&hw->common_cfg->device_feature);
|
|
|
|
|
|
|
|
io_write32(1, &hw->common_cfg->device_feature_select);
|
|
|
|
features_hi = io_read32(&hw->common_cfg->device_feature);
|
|
|
|
|
|
|
|
return ((uint64_t)features_hi << 32) | features_lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
modern_set_features(struct virtio_hw *hw, uint64_t features)
|
|
|
|
{
|
|
|
|
io_write32(0, &hw->common_cfg->guest_feature_select);
|
|
|
|
io_write32(features & ((1ULL << 32) - 1),
|
|
|
|
&hw->common_cfg->guest_feature);
|
|
|
|
|
|
|
|
io_write32(1, &hw->common_cfg->guest_feature_select);
|
|
|
|
io_write32(features >> 32,
|
|
|
|
&hw->common_cfg->guest_feature);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t
|
|
|
|
modern_get_status(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
return io_read8(&hw->common_cfg->device_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
modern_set_status(struct virtio_hw *hw, uint8_t status)
|
|
|
|
{
|
|
|
|
io_write8(status, &hw->common_cfg->device_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
modern_reset(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
|
|
|
|
modern_get_status(hw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t
|
|
|
|
modern_get_isr(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
return io_read8(hw->isr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t
|
|
|
|
modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
|
|
|
|
{
|
|
|
|
io_write16(vec, &hw->common_cfg->msix_config);
|
|
|
|
return io_read16(&hw->common_cfg->msix_config);
|
|
|
|
}
|
|
|
|
|
2017-01-17 07:10:24 +00:00
|
|
|
static uint16_t
|
|
|
|
modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
|
|
|
|
{
|
|
|
|
io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
|
|
|
|
io_write16(vec, &hw->common_cfg->queue_msix_vector);
|
|
|
|
return io_read16(&hw->common_cfg->queue_msix_vector);
|
|
|
|
}
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
static uint16_t
|
|
|
|
modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
io_write16(queue_id, &hw->common_cfg->queue_select);
|
|
|
|
return io_read16(&hw->common_cfg->queue_size);
|
|
|
|
}
|
|
|
|
|
2016-06-15 09:03:20 +00:00
|
|
|
static int
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
uint64_t desc_addr, avail_addr, used_addr;
|
|
|
|
uint16_t notify_off;
|
|
|
|
|
2016-06-15 09:03:20 +00:00
|
|
|
if (!check_vq_phys_addr_ok(vq))
|
|
|
|
return -1;
|
|
|
|
|
2016-06-02 00:12:13 +08:00
|
|
|
desc_addr = vq->vq_ring_mem;
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
|
|
|
|
used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
|
|
|
|
ring[vq->vq_nentries]),
|
|
|
|
VIRTIO_PCI_VRING_ALIGN);
|
|
|
|
|
|
|
|
io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
|
|
|
|
|
|
|
|
io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
|
|
|
|
&hw->common_cfg->queue_desc_hi);
|
|
|
|
io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
|
|
|
|
&hw->common_cfg->queue_avail_hi);
|
|
|
|
io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
|
|
|
|
&hw->common_cfg->queue_used_hi);
|
|
|
|
|
|
|
|
notify_off = io_read16(&hw->common_cfg->queue_notify_off);
|
|
|
|
vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
|
|
|
|
notify_off * hw->notify_off_multiplier);
|
|
|
|
|
|
|
|
io_write16(1, &hw->common_cfg->queue_enable);
|
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
|
|
|
|
PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
|
|
|
|
PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
|
|
|
|
PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
|
|
|
|
PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
|
|
|
|
vq->notify_addr, notify_off);
|
2016-06-15 09:03:20 +00:00
|
|
|
|
|
|
|
return 0;
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
|
|
|
|
|
|
|
|
io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
|
|
|
|
&hw->common_cfg->queue_desc_hi);
|
|
|
|
io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
|
|
|
|
&hw->common_cfg->queue_avail_hi);
|
|
|
|
io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
|
|
|
|
&hw->common_cfg->queue_used_hi);
|
|
|
|
|
|
|
|
io_write16(0, &hw->common_cfg->queue_enable);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
io_write16(1, vq->notify_addr);
|
|
|
|
}
|
|
|
|
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:19 +08:00
|
|
|
const struct virtio_pci_ops modern_ops = {
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
.read_dev_cfg = modern_read_dev_config,
|
|
|
|
.write_dev_cfg = modern_write_dev_config,
|
|
|
|
.reset = modern_reset,
|
|
|
|
.get_status = modern_get_status,
|
|
|
|
.set_status = modern_set_status,
|
|
|
|
.get_features = modern_get_features,
|
|
|
|
.set_features = modern_set_features,
|
|
|
|
.get_isr = modern_get_isr,
|
|
|
|
.set_config_irq = modern_set_config_irq,
|
2017-01-17 07:10:24 +00:00
|
|
|
.set_queue_irq = modern_set_queue_irq,
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
.get_queue_num = modern_get_queue_num,
|
|
|
|
.setup_queue = modern_setup_queue,
|
|
|
|
.del_queue = modern_del_queue,
|
|
|
|
.notify_queue = modern_notify_queue,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2016-02-02 21:48:14 +08:00
|
|
|
void
|
|
|
|
vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
|
|
|
|
void *dst, int length)
|
|
|
|
{
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
|
|
|
|
const void *src, int length)
|
|
|
|
{
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
|
2016-02-02 21:48:14 +08:00
|
|
|
}
|
|
|
|
|
2016-02-02 21:48:16 +08:00
|
|
|
uint64_t
|
|
|
|
vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
|
2013-09-18 12:00:00 +02:00
|
|
|
{
|
2016-02-02 21:48:16 +08:00
|
|
|
uint64_t features;
|
2016-02-02 21:48:14 +08:00
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
/*
|
|
|
|
* Limit negotiated features to what the driver, virtqueue, and
|
|
|
|
* host all support.
|
|
|
|
*/
|
2014-06-13 18:06:25 -07:00
|
|
|
features = host_features & hw->guest_features;
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
VTPCI_OPS(hw)->set_features(hw, features);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2014-06-12 18:32:40 -07:00
|
|
|
return features;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vtpci_reset(struct virtio_hw *hw)
|
|
|
|
{
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
|
2016-02-02 21:48:14 +08:00
|
|
|
/* flush status write */
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
VTPCI_OPS(hw)->get_status(hw);
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vtpci_reinit_complete(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vtpci_set_status(struct virtio_hw *hw, uint8_t status)
|
|
|
|
{
|
|
|
|
if (status != VIRTIO_CONFIG_STATUS_RESET)
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
status |= VTPCI_OPS(hw)->get_status(hw);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
VTPCI_OPS(hw)->set_status(hw, status);
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
2015-02-09 09:13:53 +08:00
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
uint8_t
|
|
|
|
vtpci_get_status(struct virtio_hw *hw)
|
|
|
|
{
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
return VTPCI_OPS(hw)->get_status(hw);
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
}
|
|
|
|
|
2015-02-09 09:13:53 +08:00
|
|
|
uint8_t
|
|
|
|
vtpci_isr(struct virtio_hw *hw)
|
|
|
|
{
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
return VTPCI_OPS(hw)->get_isr(hw);
|
2015-02-09 09:13:53 +08:00
|
|
|
}
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
static void *
|
|
|
|
get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
|
|
|
|
{
|
|
|
|
uint8_t bar = cap->bar;
|
|
|
|
uint32_t length = cap->length;
|
|
|
|
uint32_t offset = cap->offset;
|
|
|
|
uint8_t *base;
|
|
|
|
|
|
|
|
if (bar > 5) {
|
|
|
|
PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (offset + length < offset) {
|
|
|
|
PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
|
|
|
|
offset, length);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (offset + length > dev->mem_resource[bar].len) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"invalid cap: overflows bar space: %u > %" PRIu64,
|
|
|
|
offset + length, dev->mem_resource[bar].len);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
base = dev->mem_resource[bar].addr;
|
|
|
|
if (base == NULL) {
|
|
|
|
PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return base + offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
uint8_t pos;
|
|
|
|
struct virtio_pci_cap cap;
|
|
|
|
int ret;
|
|
|
|
|
2016-02-16 21:37:02 +01:00
|
|
|
if (rte_eal_pci_map_device(dev)) {
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
|
|
|
|
if (ret < 0) {
|
|
|
|
PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (pos) {
|
|
|
|
ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos);
|
|
|
|
if (ret < 0) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"failed to read pci cap at pos: %x", pos);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
|
|
|
|
PMD_INIT_LOG(DEBUG,
|
|
|
|
"[%2x] skipping non VNDR cap id: %02x",
|
|
|
|
pos, cap.cap_vndr);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG,
|
|
|
|
"[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
|
|
|
|
pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
|
|
|
|
|
|
|
|
switch (cap.cfg_type) {
|
|
|
|
case VIRTIO_PCI_CAP_COMMON_CFG:
|
|
|
|
hw->common_cfg = get_cfg_addr(dev, &cap);
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_CAP_NOTIFY_CFG:
|
|
|
|
rte_eal_pci_read_config(dev, &hw->notify_off_multiplier,
|
|
|
|
4, pos + sizeof(cap));
|
|
|
|
hw->notify_base = get_cfg_addr(dev, &cap);
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_CAP_DEVICE_CFG:
|
|
|
|
hw->dev_cfg = get_cfg_addr(dev, &cap);
|
|
|
|
break;
|
|
|
|
case VIRTIO_PCI_CAP_ISR_CFG:
|
|
|
|
hw->isr = get_cfg_addr(dev, &cap);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
next:
|
|
|
|
pos = cap.cap_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hw->common_cfg == NULL || hw->notify_base == NULL ||
|
|
|
|
hw->dev_cfg == NULL || hw->isr == NULL) {
|
|
|
|
PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
PMD_INIT_LOG(INFO, "found modern virtio pci device.");
|
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg);
|
|
|
|
PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg);
|
|
|
|
PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr);
|
|
|
|
PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
|
|
|
|
hw->notify_base, hw->notify_off_multiplier);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-08 23:33:43 +08:00
|
|
|
/*
|
|
|
|
* Return -1:
|
|
|
|
* if there is error mapping with VFIO/UIO.
|
|
|
|
* if port map error when driver type is KDRV_NONE.
|
2016-06-13 22:53:08 +08:00
|
|
|
* if whitelisted but driver type is KDRV_UNKNOWN.
|
2016-03-08 23:33:43 +08:00
|
|
|
* Return 1 if kernel driver is managing the device.
|
|
|
|
* Return 0 on success.
|
|
|
|
*/
|
2016-02-02 21:48:14 +08:00
|
|
|
int
|
2016-05-09 09:35:57 -07:00
|
|
|
vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
|
|
|
|
uint32_t *dev_flags)
|
2016-02-02 21:48:14 +08:00
|
|
|
{
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
/*
|
|
|
|
* Try if we can succeed reading virtio pci caps, which exists
|
|
|
|
* only on modern pci device. If failed, we fallback to legacy
|
|
|
|
* virtio handling.
|
|
|
|
*/
|
|
|
|
if (virtio_read_caps(dev, hw) == 0) {
|
|
|
|
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
|
|
|
|
hw->modern = 1;
|
2016-05-09 09:35:57 -07:00
|
|
|
*dev_flags |= RTE_ETH_DEV_INTR_LSC;
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
|
2016-05-09 09:35:57 -07:00
|
|
|
if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) {
|
2016-03-08 23:33:43 +08:00
|
|
|
if (dev->kdrv == RTE_KDRV_UNKNOWN &&
|
2016-09-20 18:11:36 +05:30
|
|
|
(!dev->device.devargs ||
|
|
|
|
dev->device.devargs->type !=
|
|
|
|
RTE_DEVTYPE_WHITELISTED_PCI)) {
|
2016-03-08 23:33:43 +08:00
|
|
|
PMD_INIT_LOG(INFO,
|
|
|
|
"skip kernel managed virtio device.");
|
|
|
|
return 1;
|
|
|
|
}
|
2016-02-02 21:48:15 +08:00
|
|
|
return -1;
|
2016-03-08 23:33:43 +08:00
|
|
|
}
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
|
2016-02-02 21:48:15 +08:00
|
|
|
hw->use_msix = legacy_virtio_has_msix(&dev->addr);
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
hw->modern = 0;
|
2016-02-02 21:48:15 +08:00
|
|
|
|
2016-02-02 21:48:14 +08:00
|
|
|
return 0;
|
2015-02-09 09:13:53 +08:00
|
|
|
}
|