2013-09-18 12:00:00 +02:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2014-02-10 11:46:50 +00:00
|
|
|
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
|
2013-09-18 12:00:00 +02:00
|
|
|
* All rights reserved.
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2013-09-18 12:00:00 +02:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2013-09-18 12:00:00 +02:00
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
2014-06-04 00:42:50 +01:00
|
|
|
*
|
2013-09-18 12:00:00 +02:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _VIRTIO_PCI_H_
|
|
|
|
#define _VIRTIO_PCI_H_
|
|
|
|
|
|
|
|
#include <stdint.h>
|
2014-02-10 11:49:10 +00:00
|
|
|
|
2016-02-16 21:37:04 +01:00
|
|
|
#include <rte_pci.h>
|
2013-09-18 12:00:00 +02:00
|
|
|
#include <rte_ethdev.h>
|
|
|
|
|
|
|
|
struct virtqueue;
|
2016-06-02 00:12:13 +08:00
|
|
|
struct virtnet_ctl;
|
2013-09-18 12:00:00 +02:00
|
|
|
|
|
|
|
/* VirtIO PCI vendor/device ID. */
|
|
|
|
#define VIRTIO_PCI_VENDORID 0x1AF4
|
2016-09-28 16:25:11 +08:00
|
|
|
#define VIRTIO_PCI_LEGACY_DEVICEID_NET 0x1000
|
|
|
|
#define VIRTIO_PCI_MODERN_DEVICEID_NET 0x1041
|
2013-09-18 12:00:00 +02:00
|
|
|
|
|
|
|
/* VirtIO ABI version, this must match exactly. */
|
|
|
|
#define VIRTIO_PCI_ABI_VERSION 0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VirtIO Header, located in BAR 0.
|
|
|
|
*/
|
|
|
|
#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
|
|
|
|
#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
|
|
|
|
#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
|
|
|
|
#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
|
|
|
|
#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
|
|
|
|
#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
|
|
|
|
#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
|
2014-06-12 18:32:40 -07:00
|
|
|
#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
|
|
|
|
* also clears the register (8, RO) */
|
2013-09-18 12:00:00 +02:00
|
|
|
/* Only if MSIX is enabled: */
|
|
|
|
#define VIRTIO_MSI_CONFIG_VECTOR 20 /* configuration change vector (16, RW) */
|
2014-06-12 18:32:40 -07:00
|
|
|
#define VIRTIO_MSI_QUEUE_VECTOR 22 /* vector for selected VQ notifications
|
|
|
|
(16, RW) */
|
2013-09-18 12:00:00 +02:00
|
|
|
|
|
|
|
/* The bit of the ISR which indicates a device has an interrupt. */
|
|
|
|
#define VIRTIO_PCI_ISR_INTR 0x1
|
|
|
|
/* The bit of the ISR which indicates a device configuration change. */
|
|
|
|
#define VIRTIO_PCI_ISR_CONFIG 0x2
|
|
|
|
/* Vector value used to disable MSI for queue. */
|
|
|
|
#define VIRTIO_MSI_NO_VECTOR 0xFFFF
|
|
|
|
|
|
|
|
/* VirtIO device IDs. */
|
|
|
|
#define VIRTIO_ID_NETWORK 0x01
|
|
|
|
#define VIRTIO_ID_BLOCK 0x02
|
|
|
|
#define VIRTIO_ID_CONSOLE 0x03
|
|
|
|
#define VIRTIO_ID_ENTROPY 0x04
|
|
|
|
#define VIRTIO_ID_BALLOON 0x05
|
|
|
|
#define VIRTIO_ID_IOMEMORY 0x06
|
|
|
|
#define VIRTIO_ID_9P 0x09
|
|
|
|
|
|
|
|
/* Status byte for guest to report progress. */
|
|
|
|
#define VIRTIO_CONFIG_STATUS_RESET 0x00
|
|
|
|
#define VIRTIO_CONFIG_STATUS_ACK 0x01
|
|
|
|
#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
|
|
|
|
#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
|
2013-09-18 12:00:00 +02:00
|
|
|
#define VIRTIO_CONFIG_STATUS_FAILED 0x80
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each virtqueue indirect descriptor list must be physically contiguous.
|
|
|
|
* To allow us to malloc(9) each list individually, limit the number
|
|
|
|
* supported to what will fit in one page. With 4KB pages, this is a limit
|
|
|
|
* of 256 descriptors. If there is ever a need for more, we can switch to
|
|
|
|
* contigmalloc(9) for the larger allocations, similar to what
|
|
|
|
* bus_dmamem_alloc(9) does.
|
|
|
|
*
|
|
|
|
* Note the sizeof(struct vring_desc) is 16 bytes.
|
|
|
|
*/
|
|
|
|
#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
|
|
|
|
|
|
|
|
/* The feature bitmap for virtio net */
|
2015-06-11 08:53:26 -07:00
|
|
|
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
|
|
|
|
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
|
|
|
|
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
|
|
|
|
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
|
|
|
|
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
|
|
|
|
#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
|
|
|
|
#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
|
|
|
|
#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
|
|
|
|
#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
|
|
|
|
#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
|
|
|
|
#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
|
|
|
|
#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
|
|
|
|
#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
|
|
|
|
#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
|
|
|
|
#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
|
|
|
|
#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
|
|
|
|
#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
|
|
|
|
#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the
|
|
|
|
* network */
|
|
|
|
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
|
|
|
|
* Steering */
|
|
|
|
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
|
|
|
|
|
|
|
|
/* Do we get callbacks when the ring is completely used, even if we've
|
|
|
|
* suppressed them? */
|
|
|
|
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
|
|
|
|
|
|
|
|
/* Can the device handle any descriptor layout? */
|
|
|
|
#define VIRTIO_F_ANY_LAYOUT 27
|
|
|
|
|
|
|
|
/* We support indirect buffer descriptors */
|
|
|
|
#define VIRTIO_RING_F_INDIRECT_DESC 28
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
#define VIRTIO_F_VERSION_1 32
|
2016-09-28 16:25:12 +08:00
|
|
|
#define VIRTIO_F_IOMMU_PLATFORM 33
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
|
2015-06-11 08:53:26 -07:00
|
|
|
/*
|
|
|
|
* Some VirtIO feature bits (currently bits 28 through 31) are
|
|
|
|
* reserved for the transport being used (eg. virtio_ring), the
|
|
|
|
* rest are per-device feature bits.
|
2013-09-18 12:00:00 +02:00
|
|
|
*/
|
2015-06-11 08:53:26 -07:00
|
|
|
#define VIRTIO_TRANSPORT_F_START 28
|
2016-09-28 16:25:12 +08:00
|
|
|
#define VIRTIO_TRANSPORT_F_END 34
|
2015-06-11 08:53:26 -07:00
|
|
|
|
|
|
|
/* The Guest publishes the used index for which it expects an interrupt
|
|
|
|
* at the end of the avail ring. Host should ignore the avail->flags field. */
|
|
|
|
/* The Host publishes the avail index for which it expects a kick
|
|
|
|
* at the end of the used ring. Guest should ignore the used->flags field. */
|
|
|
|
#define VIRTIO_RING_F_EVENT_IDX 29
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2015-06-11 08:53:26 -07:00
|
|
|
#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
|
|
|
|
#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
|
2013-09-18 12:00:00 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximum number of virtqueues per device.
|
|
|
|
*/
|
|
|
|
#define VIRTIO_MAX_VIRTQUEUES 8
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
/* Common configuration */
|
|
|
|
#define VIRTIO_PCI_CAP_COMMON_CFG 1
|
|
|
|
/* Notifications */
|
|
|
|
#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
|
|
|
|
/* ISR Status */
|
|
|
|
#define VIRTIO_PCI_CAP_ISR_CFG 3
|
|
|
|
/* Device specific configuration */
|
|
|
|
#define VIRTIO_PCI_CAP_DEVICE_CFG 4
|
|
|
|
/* PCI configuration access */
|
|
|
|
#define VIRTIO_PCI_CAP_PCI_CFG 5
|
|
|
|
|
|
|
|
/* This is the PCI capability header: */
|
|
|
|
struct virtio_pci_cap {
|
|
|
|
uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
|
|
|
|
uint8_t cap_next; /* Generic PCI field: next ptr. */
|
|
|
|
uint8_t cap_len; /* Generic PCI field: capability length */
|
|
|
|
uint8_t cfg_type; /* Identifies the structure. */
|
|
|
|
uint8_t bar; /* Where to find it. */
|
|
|
|
uint8_t padding[3]; /* Pad to full dword. */
|
|
|
|
uint32_t offset; /* Offset within bar. */
|
|
|
|
uint32_t length; /* Length of the structure, in bytes. */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct virtio_pci_notify_cap {
|
|
|
|
struct virtio_pci_cap cap;
|
|
|
|
uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
|
|
|
|
struct virtio_pci_common_cfg {
|
|
|
|
/* About the whole device. */
|
|
|
|
uint32_t device_feature_select; /* read-write */
|
|
|
|
uint32_t device_feature; /* read-only */
|
|
|
|
uint32_t guest_feature_select; /* read-write */
|
|
|
|
uint32_t guest_feature; /* read-write */
|
|
|
|
uint16_t msix_config; /* read-write */
|
|
|
|
uint16_t num_queues; /* read-only */
|
|
|
|
uint8_t device_status; /* read-write */
|
|
|
|
uint8_t config_generation; /* read-only */
|
|
|
|
|
|
|
|
/* About a specific virtqueue. */
|
|
|
|
uint16_t queue_select; /* read-write */
|
|
|
|
uint16_t queue_size; /* read-write, power of 2. */
|
|
|
|
uint16_t queue_msix_vector; /* read-write */
|
|
|
|
uint16_t queue_enable; /* read-write */
|
|
|
|
uint16_t queue_notify_off; /* read-only */
|
|
|
|
uint32_t queue_desc_lo; /* read-write */
|
|
|
|
uint32_t queue_desc_hi; /* read-write */
|
|
|
|
uint32_t queue_avail_lo; /* read-write */
|
|
|
|
uint32_t queue_avail_hi; /* read-write */
|
|
|
|
uint32_t queue_used_lo; /* read-write */
|
|
|
|
uint32_t queue_used_hi; /* read-write */
|
|
|
|
};
|
|
|
|
|
2016-02-02 21:48:14 +08:00
|
|
|
struct virtio_hw;
|
|
|
|
|
|
|
|
struct virtio_pci_ops {
|
|
|
|
void (*read_dev_cfg)(struct virtio_hw *hw, size_t offset,
|
|
|
|
void *dst, int len);
|
|
|
|
void (*write_dev_cfg)(struct virtio_hw *hw, size_t offset,
|
|
|
|
const void *src, int len);
|
|
|
|
void (*reset)(struct virtio_hw *hw);
|
|
|
|
|
|
|
|
uint8_t (*get_status)(struct virtio_hw *hw);
|
|
|
|
void (*set_status)(struct virtio_hw *hw, uint8_t status);
|
|
|
|
|
2016-02-02 21:48:16 +08:00
|
|
|
uint64_t (*get_features)(struct virtio_hw *hw);
|
|
|
|
void (*set_features)(struct virtio_hw *hw, uint64_t features);
|
2016-02-02 21:48:14 +08:00
|
|
|
|
|
|
|
uint8_t (*get_isr)(struct virtio_hw *hw);
|
|
|
|
|
|
|
|
uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
|
|
|
|
|
2017-01-17 07:10:24 +00:00
|
|
|
uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq,
|
|
|
|
uint16_t vec);
|
|
|
|
|
2016-02-02 21:48:14 +08:00
|
|
|
uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
|
2016-06-15 09:03:20 +00:00
|
|
|
int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
|
2016-02-02 21:48:14 +08:00
|
|
|
void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
|
|
|
|
void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq);
|
|
|
|
};
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
struct virtio_net_config;
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
struct virtio_hw {
|
2016-06-02 00:12:13 +08:00
|
|
|
struct virtnet_ctl *cvq;
|
2016-10-13 16:16:02 +02:00
|
|
|
uint64_t req_guest_features;
|
2016-02-02 21:48:16 +08:00
|
|
|
uint64_t guest_features;
|
2016-10-13 16:16:01 +02:00
|
|
|
uint32_t max_queue_pairs;
|
2014-06-13 18:06:25 -07:00
|
|
|
uint16_t vtnet_hdr_size;
|
2015-02-09 09:13:55 +08:00
|
|
|
uint8_t vlan_strip;
|
2014-06-16 12:10:26 +01:00
|
|
|
uint8_t use_msix;
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
uint8_t modern;
|
2016-07-05 18:19:23 +05:30
|
|
|
uint8_t use_simple_rxtx;
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
uint8_t port_id;
|
2013-09-18 12:00:00 +02:00
|
|
|
uint8_t mac_addr[ETHER_ADDR_LEN];
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
uint32_t notify_off_multiplier;
|
|
|
|
uint8_t *isr;
|
|
|
|
uint16_t *notify_base;
|
|
|
|
struct virtio_pci_common_cfg *common_cfg;
|
|
|
|
struct virtio_net_config *dev_cfg;
|
2016-06-15 09:03:24 +00:00
|
|
|
void *virtio_user_dev;
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 17:40:59 +08:00
|
|
|
|
|
|
|
struct virtqueue **vqs;
|
2013-09-18 12:00:00 +02:00
|
|
|
};
|
|
|
|
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* While virtio_hw is stored in shared memory, this structure stores
|
|
|
|
* some infos that may vary in the multiple process model locally.
|
|
|
|
* For example, the vtpci_ops pointer.
|
|
|
|
*/
|
|
|
|
struct virtio_hw_internal {
|
|
|
|
const struct virtio_pci_ops *vtpci_ops;
|
2017-01-06 18:16:18 +08:00
|
|
|
struct rte_pci_ioport io;
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->port_id].vtpci_ops)
|
2017-01-06 18:16:18 +08:00
|
|
|
#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->port_id].io)
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:17 +08:00
|
|
|
|
|
|
|
extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
|
|
|
|
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
/*
|
|
|
|
* This structure is just a reference to read
|
|
|
|
* net device specific config space; it just a chodu structure
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct virtio_net_config {
|
|
|
|
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
|
|
|
|
uint8_t mac[ETHER_ADDR_LEN];
|
|
|
|
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
|
|
|
|
uint16_t status;
|
2014-05-29 15:18:20 +08:00
|
|
|
uint16_t max_virtqueue_pairs;
|
|
|
|
} __attribute__((packed));
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
/*
|
|
|
|
* How many bits to shift physical queue address written to QUEUE_PFN.
|
|
|
|
* 12 is historical, and due to x86 page size.
|
|
|
|
*/
|
|
|
|
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
|
|
|
|
|
|
|
|
/* The alignment to use between consumer and producer parts of vring. */
|
|
|
|
#define VIRTIO_PCI_VRING_ALIGN 4096
|
|
|
|
|
|
|
|
static inline int
|
2016-02-02 21:48:16 +08:00
|
|
|
vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
|
2013-09-18 12:00:00 +02:00
|
|
|
{
|
2016-02-02 21:48:16 +08:00
|
|
|
return (hw->guest_features & (1ULL << bit)) != 0;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function declaration from virtio_pci.c
|
|
|
|
*/
|
2016-05-09 09:35:57 -07:00
|
|
|
int vtpci_init(struct rte_pci_device *, struct virtio_hw *,
|
|
|
|
uint32_t *dev_flags);
|
2013-09-18 12:00:00 +02:00
|
|
|
void vtpci_reset(struct virtio_hw *);
|
|
|
|
|
|
|
|
void vtpci_reinit_complete(struct virtio_hw *);
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 21:48:19 +08:00
|
|
|
uint8_t vtpci_get_status(struct virtio_hw *);
|
2013-09-18 12:00:00 +02:00
|
|
|
void vtpci_set_status(struct virtio_hw *, uint8_t);
|
|
|
|
|
2016-02-02 21:48:16 +08:00
|
|
|
uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2016-02-02 21:48:14 +08:00
|
|
|
void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2016-02-02 21:48:13 +08:00
|
|
|
void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2015-02-09 09:13:53 +08:00
|
|
|
uint8_t vtpci_isr(struct virtio_hw *);
|
|
|
|
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 18:16:19 +08:00
|
|
|
extern const struct virtio_pci_ops legacy_ops;
|
|
|
|
extern const struct virtio_pci_ops modern_ops;
|
|
|
|
extern const struct virtio_pci_ops virtio_user_ops;
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
#endif /* _VIRTIO_PCI_H_ */
|