2013-09-18 10:00:00 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2016-06-15 15:25:32 +00:00
|
|
|
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
|
2013-09-18 10:00:00 +00:00
|
|
|
* All rights reserved.
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2013-09-18 10:00:00 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2013-09-18 10:00:00 +00:00
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2013-09-18 10:00:00 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include <rte_ethdev.h>
|
|
|
|
#include <rte_memcpy.h>
|
|
|
|
#include <rte_string_fns.h>
|
|
|
|
#include <rte_memzone.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_pci.h>
|
|
|
|
#include <rte_ether.h>
|
|
|
|
#include <rte_common.h>
|
2015-07-15 13:51:00 +00:00
|
|
|
#include <rte_errno.h>
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_eal.h>
|
2014-04-21 14:59:37 +00:00
|
|
|
#include <rte_dev.h>
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
#include "virtio_ethdev.h"
|
|
|
|
#include "virtio_pci.h"
|
|
|
|
#include "virtio_logs.h"
|
|
|
|
#include "virtqueue.h"
|
2015-10-29 14:53:22 +00:00
|
|
|
#include "virtio_rxtx.h"
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-07-15 13:51:00 +00:00
|
|
|
static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
|
2013-09-18 10:00:00 +00:00
|
|
|
static int virtio_dev_configure(struct rte_eth_dev *dev);
|
|
|
|
static int virtio_dev_start(struct rte_eth_dev *dev);
|
|
|
|
static void virtio_dev_stop(struct rte_eth_dev *dev);
|
2014-11-08 04:26:15 +00:00
|
|
|
static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
|
|
|
|
static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
|
|
|
|
static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
|
|
|
|
static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
|
2013-09-18 10:00:00 +00:00
|
|
|
static void virtio_dev_info_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_dev_info *dev_info);
|
|
|
|
static int virtio_dev_link_update(struct rte_eth_dev *dev,
|
|
|
|
__rte_unused int wait_to_complete);
|
|
|
|
|
|
|
|
static void virtio_set_hwaddr(struct virtio_hw *hw);
|
|
|
|
static void virtio_get_hwaddr(struct virtio_hw *hw);
|
|
|
|
|
2015-11-02 10:19:00 +00:00
|
|
|
static void virtio_dev_stats_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_stats *stats);
|
|
|
|
static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
|
2016-06-15 15:25:33 +00:00
|
|
|
struct rte_eth_xstat *xstats, unsigned n);
|
2016-06-15 15:25:32 +00:00
|
|
|
static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
|
|
unsigned limit);
|
2013-09-18 10:00:00 +00:00
|
|
|
static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
|
|
|
|
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
|
2015-02-09 01:14:02 +00:00
|
|
|
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
|
|
|
|
uint16_t vlan_id, int on);
|
2015-02-09 01:14:03 +00:00
|
|
|
static void virtio_mac_addr_add(struct rte_eth_dev *dev,
|
|
|
|
struct ether_addr *mac_addr,
|
|
|
|
uint32_t index, uint32_t vmdq __rte_unused);
|
|
|
|
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
|
2015-02-09 01:14:04 +00:00
|
|
|
static void virtio_mac_addr_set(struct rte_eth_dev *dev,
|
|
|
|
struct ether_addr *mac_addr);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
static int virtio_dev_queue_stats_mapping_set(
|
|
|
|
__rte_unused struct rte_eth_dev *eth_dev,
|
|
|
|
__rte_unused uint16_t queue_id,
|
|
|
|
__rte_unused uint8_t stat_idx,
|
|
|
|
__rte_unused uint8_t is_rx);
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/*
|
|
|
|
* The set of PCI devices this driver supports
|
|
|
|
*/
|
2015-04-16 23:23:39 +00:00
|
|
|
static const struct rte_pci_id pci_id_virtio_map[] = {
|
2016-09-28 08:25:11 +00:00
|
|
|
{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
|
|
|
|
{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
|
2016-07-11 14:40:40 +00:00
|
|
|
{ .vendor_id = 0, /* sentinel */ },
|
2013-09-18 10:00:00 +00:00
|
|
|
};
|
|
|
|
|
2015-11-02 10:19:00 +00:00
|
|
|
struct rte_virtio_xstats_name_off {
|
|
|
|
char name[RTE_ETH_XSTATS_NAME_SIZE];
|
|
|
|
unsigned offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* [rt]x_qX_ is prepended to the name string here */
|
2016-06-01 16:12:13 +00:00
|
|
|
static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
|
|
|
|
{"good_packets", offsetof(struct virtnet_rx, stats.packets)},
|
|
|
|
{"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
|
|
|
|
{"errors", offsetof(struct virtnet_rx, stats.errors)},
|
|
|
|
{"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
|
|
|
|
{"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
|
|
|
|
{"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
|
|
|
|
{"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
|
|
|
|
{"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
|
|
|
|
{"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
|
|
|
|
{"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
|
|
|
|
{"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
|
2016-09-07 06:11:00 +00:00
|
|
|
{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
|
|
|
|
{"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
|
2015-11-02 10:19:00 +00:00
|
|
|
};
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
/* [rt]x_qX_ is prepended to the name string here */
|
|
|
|
static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
|
|
|
|
{"good_packets", offsetof(struct virtnet_tx, stats.packets)},
|
|
|
|
{"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
|
|
|
|
{"errors", offsetof(struct virtnet_tx, stats.errors)},
|
|
|
|
{"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
|
|
|
|
{"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
|
|
|
|
{"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
|
|
|
|
{"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
|
|
|
|
{"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
|
|
|
|
{"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
|
|
|
|
{"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
|
|
|
|
{"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
|
2016-09-07 06:11:00 +00:00
|
|
|
{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
|
|
|
|
{"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
|
2016-06-01 16:12:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
|
|
|
|
sizeof(rte_virtio_rxq_stat_strings[0]))
|
|
|
|
#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
|
|
|
|
sizeof(rte_virtio_txq_stat_strings[0]))
|
2015-11-02 10:19:00 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
static int
|
2016-06-01 16:12:13 +00:00
|
|
|
virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
|
2014-05-29 07:18:20 +00:00
|
|
|
int *dlen, int pkt_num)
|
|
|
|
{
|
2015-05-25 10:20:52 +00:00
|
|
|
uint32_t head, i;
|
2014-05-29 07:18:20 +00:00
|
|
|
int k, sum = 0;
|
|
|
|
virtio_net_ctrl_ack status = ~0;
|
|
|
|
struct virtio_pmd_ctrl result;
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtqueue *vq;
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
ctrl->status = status;
|
|
|
|
|
2016-07-05 11:42:59 +00:00
|
|
|
if (!cvq || !cvq->vq) {
|
2016-02-10 16:08:54 +00:00
|
|
|
PMD_INIT_LOG(ERR, "Control queue is not supported.");
|
2014-05-29 07:18:20 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2016-06-01 16:12:13 +00:00
|
|
|
vq = cvq->vq;
|
2015-05-25 10:20:52 +00:00
|
|
|
head = vq->vq_desc_head_idx;
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
|
2014-06-14 01:06:19 +00:00
|
|
|
"vq->hw->cvq = %p vq = %p",
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
|
|
|
|
|
|
|
|
if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
|
|
|
|
return -1;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
|
2014-05-29 07:18:20 +00:00
|
|
|
sizeof(struct virtio_pmd_ctrl));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Format is enforced in qemu code:
|
|
|
|
* One TX packet for header;
|
|
|
|
* At least one TX packet per argument;
|
|
|
|
* One RX packet for ACK.
|
|
|
|
*/
|
|
|
|
vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
|
|
|
|
vq->vq_free_cnt--;
|
|
|
|
i = vq->vq_ring.desc[head].next;
|
|
|
|
|
|
|
|
for (k = 0; k < pkt_num; k++) {
|
|
|
|
vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
|
2014-05-29 07:18:20 +00:00
|
|
|
+ sizeof(struct virtio_net_ctrl_hdr)
|
|
|
|
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
|
|
|
|
vq->vq_ring.desc[i].len = dlen[k];
|
|
|
|
sum += dlen[k];
|
|
|
|
vq->vq_free_cnt--;
|
|
|
|
i = vq->vq_ring.desc[i].next;
|
|
|
|
}
|
|
|
|
|
|
|
|
vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
|
2014-05-29 07:18:20 +00:00
|
|
|
+ sizeof(struct virtio_net_ctrl_hdr);
|
|
|
|
vq->vq_ring.desc[i].len = sizeof(ctrl->status);
|
|
|
|
vq->vq_free_cnt--;
|
|
|
|
|
|
|
|
vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
|
|
|
|
|
|
|
|
vq_update_avail_ring(vq, head);
|
|
|
|
vq_update_avail_idx(vq);
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
virtqueue_notify(vq);
|
|
|
|
|
2015-02-09 01:14:05 +00:00
|
|
|
rte_rmb();
|
2016-06-19 17:48:52 +00:00
|
|
|
while (VIRTQUEUE_NUSED(vq) == 0) {
|
2015-02-09 01:14:05 +00:00
|
|
|
rte_rmb();
|
2014-05-29 07:18:20 +00:00
|
|
|
usleep(100);
|
2015-02-09 01:14:05 +00:00
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-19 17:48:52 +00:00
|
|
|
while (VIRTQUEUE_NUSED(vq)) {
|
2014-05-29 07:18:20 +00:00
|
|
|
uint32_t idx, desc_idx, used_idx;
|
|
|
|
struct vring_used_elem *uep;
|
|
|
|
|
|
|
|
used_idx = (uint32_t)(vq->vq_used_cons_idx
|
|
|
|
& (vq->vq_nentries - 1));
|
|
|
|
uep = &vq->vq_ring.used->ring[used_idx];
|
|
|
|
idx = (uint32_t) uep->id;
|
|
|
|
desc_idx = idx;
|
|
|
|
|
|
|
|
while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
|
|
|
|
desc_idx = vq->vq_ring.desc[desc_idx].next;
|
|
|
|
vq->vq_free_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
|
|
|
|
vq->vq_desc_head_idx = idx;
|
|
|
|
|
|
|
|
vq->vq_used_cons_idx++;
|
|
|
|
vq->vq_free_cnt++;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_free_cnt, vq->vq_desc_head_idx);
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
memcpy(&result, cvq->virtio_net_hdr_mz->addr,
|
2014-05-29 07:18:20 +00:00
|
|
|
sizeof(struct virtio_pmd_ctrl));
|
|
|
|
|
|
|
|
return result.status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-05-29 07:18:20 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
|
|
|
|
memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
|
|
|
|
|
|
|
|
dlen[0] = sizeof(uint16_t);
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
|
|
|
if (ret) {
|
|
|
|
PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
|
2014-06-14 01:06:19 +00:00
|
|
|
"failed, this is too late now...");
|
2014-05-29 07:18:20 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-15 13:51:02 +00:00
|
|
|
void
|
2016-04-29 00:48:46 +00:00
|
|
|
virtio_dev_queue_release(struct virtqueue *vq)
|
|
|
|
{
|
2015-10-20 15:37:41 +00:00
|
|
|
struct virtio_hw *hw;
|
2015-07-15 13:51:02 +00:00
|
|
|
|
|
|
|
if (vq) {
|
2015-10-20 15:37:41 +00:00
|
|
|
hw = vq->hw;
|
2016-04-29 00:48:46 +00:00
|
|
|
if (vq->configured)
|
|
|
|
hw->vtpci_ops->del_queue(hw, vq);
|
|
|
|
|
2015-10-29 14:53:22 +00:00
|
|
|
rte_free(vq->sw_ring);
|
2015-07-15 13:51:02 +00:00
|
|
|
rte_free(vq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
int virtio_dev_queue_setup(struct rte_eth_dev *dev,
|
|
|
|
int queue_type,
|
|
|
|
uint16_t queue_idx,
|
2015-07-20 18:40:46 +00:00
|
|
|
uint16_t vtpci_queue_idx,
|
2013-09-18 10:00:00 +00:00
|
|
|
uint16_t nb_desc,
|
|
|
|
unsigned int socket_id,
|
2016-06-01 16:12:13 +00:00
|
|
|
void **pvq)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
|
|
|
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
|
2016-06-01 16:12:13 +00:00
|
|
|
char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
|
|
|
|
const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
|
2015-08-28 16:23:38 +00:00
|
|
|
unsigned int vq_size, size;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
struct virtnet_rx *rxvq = NULL;
|
|
|
|
struct virtnet_tx *txvq = NULL;
|
|
|
|
struct virtnet_ctl *cvq = NULL;
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtqueue *vq;
|
2016-04-29 00:48:45 +00:00
|
|
|
const char *queue_names[] = {"rvq", "txq", "cvq"};
|
2016-06-01 16:12:13 +00:00
|
|
|
size_t sz_vq, sz_q = 0, sz_hdr_mz = 0;
|
|
|
|
void *sw_ring = NULL;
|
|
|
|
int ret;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2016-02-02 13:48:14 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the virtqueue size from the Queue Size field
|
|
|
|
* Always power of 2 and if 0 virtqueue does not exist
|
|
|
|
*/
|
2016-02-02 13:48:14 +00:00
|
|
|
vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx);
|
2015-08-28 16:23:38 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc);
|
2013-09-18 10:00:00 +00:00
|
|
|
if (vq_size == 0) {
|
2016-02-10 16:08:54 +00:00
|
|
|
PMD_INIT_LOG(ERR, "virtqueue does not exist");
|
2014-06-13 01:32:40 +00:00
|
|
|
return -EINVAL;
|
2015-06-11 15:53:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!rte_is_power_of_2(vq_size)) {
|
2016-02-10 16:08:54 +00:00
|
|
|
PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
|
2014-06-13 01:32:40 +00:00
|
|
|
return -EINVAL;
|
2015-06-11 15:53:27 +00:00
|
|
|
}
|
|
|
|
|
2016-04-29 00:48:45 +00:00
|
|
|
snprintf(vq_name, sizeof(vq_name), "port%d_%s%d",
|
|
|
|
dev->data->port_id, queue_names[queue_type], queue_idx);
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
sz_vq = RTE_ALIGN_CEIL(sizeof(*vq) +
|
|
|
|
vq_size * sizeof(struct vq_desc_extra),
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
2016-04-29 00:48:45 +00:00
|
|
|
if (queue_type == VTNET_RQ) {
|
2016-06-01 16:12:13 +00:00
|
|
|
sz_q = sz_vq + sizeof(*rxvq);
|
|
|
|
} else if (queue_type == VTNET_TQ) {
|
|
|
|
sz_q = sz_vq + sizeof(*txvq);
|
|
|
|
/*
|
|
|
|
* For each xmit packet, allocate a virtio_net_hdr
|
|
|
|
* and indirect ring elements
|
|
|
|
*/
|
|
|
|
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
|
|
|
|
} else if (queue_type == VTNET_CQ) {
|
|
|
|
sz_q = sz_vq + sizeof(*cvq);
|
|
|
|
/* Allocate a page for control vq command, data and status */
|
|
|
|
sz_hdr_mz = PAGE_SIZE;
|
2015-10-29 14:53:22 +00:00
|
|
|
}
|
2014-05-29 07:18:19 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id);
|
|
|
|
if (vq == NULL) {
|
|
|
|
PMD_INIT_LOG(ERR, "can not allocate vq");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
vq->hw = hw;
|
|
|
|
vq->vq_queue_index = vtpci_queue_idx;
|
|
|
|
vq->vq_nentries = vq_size;
|
2015-07-20 18:40:45 +00:00
|
|
|
|
|
|
|
if (nb_desc == 0 || nb_desc > vq_size)
|
|
|
|
nb_desc = vq_size;
|
|
|
|
vq->vq_free_cnt = nb_desc;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserve a memzone for vring elements
|
|
|
|
*/
|
|
|
|
size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
|
|
|
|
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
|
2016-06-01 16:12:13 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
|
|
|
|
size, vq->vq_ring_size);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,
|
|
|
|
0, VIRTIO_PCI_VRING_ALIGN);
|
2013-09-18 10:00:00 +00:00
|
|
|
if (mz == NULL) {
|
2015-07-15 13:51:00 +00:00
|
|
|
if (rte_errno == EEXIST)
|
|
|
|
mz = rte_memzone_lookup(vq_name);
|
|
|
|
if (mz == NULL) {
|
2016-06-01 16:12:13 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_q_alloc;
|
2015-07-15 13:51:00 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-05-29 07:18:19 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
memset(mz->addr, 0, sizeof(mz->len));
|
2016-06-01 16:12:13 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
vq->vq_ring_mem = mz->phys_addr;
|
|
|
|
vq->vq_ring_virt_mem = mz->addr;
|
2016-06-01 16:12:13 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
|
|
|
|
(uint64_t)mz->phys_addr);
|
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
|
|
|
|
(uint64_t)(uintptr_t)mz->addr);
|
|
|
|
|
|
|
|
if (sz_hdr_mz) {
|
|
|
|
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr",
|
|
|
|
dev->data->port_id, queue_names[queue_type],
|
|
|
|
queue_idx);
|
|
|
|
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
|
2016-03-04 18:19:19 +00:00
|
|
|
socket_id, 0,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (hdr_mz == NULL) {
|
2015-07-15 13:51:00 +00:00
|
|
|
if (rte_errno == EEXIST)
|
2016-06-01 16:12:13 +00:00
|
|
|
hdr_mz = rte_memzone_lookup(vq_hdr_name);
|
2016-03-04 18:19:19 +00:00
|
|
|
if (hdr_mz == NULL) {
|
2016-06-01 16:12:13 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_q_alloc;
|
2015-07-15 13:51:00 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2016-06-01 16:12:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (queue_type == VTNET_RQ) {
|
|
|
|
size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
|
|
|
|
sizeof(vq->sw_ring[0]);
|
|
|
|
|
|
|
|
sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
|
|
|
|
RTE_CACHE_LINE_SIZE, socket_id);
|
|
|
|
if (!sw_ring) {
|
|
|
|
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_q_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
vq->sw_ring = sw_ring;
|
|
|
|
rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq);
|
|
|
|
rxvq->vq = vq;
|
|
|
|
rxvq->port_id = dev->data->port_id;
|
|
|
|
rxvq->queue_id = queue_idx;
|
|
|
|
rxvq->mz = mz;
|
|
|
|
*pvq = rxvq;
|
|
|
|
} else if (queue_type == VTNET_TQ) {
|
|
|
|
txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq);
|
|
|
|
txvq->vq = vq;
|
|
|
|
txvq->port_id = dev->data->port_id;
|
|
|
|
txvq->queue_id = queue_idx;
|
|
|
|
txvq->mz = mz;
|
|
|
|
txvq->virtio_net_hdr_mz = hdr_mz;
|
|
|
|
txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
|
2016-03-04 18:19:19 +00:00
|
|
|
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
*pvq = txvq;
|
|
|
|
} else if (queue_type == VTNET_CQ) {
|
|
|
|
cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq);
|
|
|
|
cvq->vq = vq;
|
|
|
|
cvq->mz = mz;
|
|
|
|
cvq->virtio_net_hdr_mz = hdr_mz;
|
|
|
|
cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
|
|
|
|
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
|
|
|
|
*pvq = cvq;
|
|
|
|
}
|
|
|
|
|
2016-07-22 02:24:47 +00:00
|
|
|
/* For virtio_user case (that is when dev->pci_dev is NULL), we use
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
* virtual address. And we need properly set _offset_, please see
|
2016-07-19 12:31:59 +00:00
|
|
|
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
*/
|
|
|
|
if (dev->pci_dev)
|
|
|
|
vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
|
|
|
|
else {
|
|
|
|
vq->vq_ring_mem = (uintptr_t)mz->addr;
|
|
|
|
vq->offset = offsetof(struct rte_mbuf, buf_addr);
|
|
|
|
if (queue_type == VTNET_TQ)
|
|
|
|
txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
|
|
|
|
else if (queue_type == VTNET_CQ)
|
|
|
|
cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (queue_type == VTNET_TQ) {
|
|
|
|
struct virtio_tx_region *txr;
|
|
|
|
unsigned int i;
|
|
|
|
|
2016-03-04 18:19:19 +00:00
|
|
|
txr = hdr_mz->addr;
|
|
|
|
memset(txr, 0, vq_size * sizeof(*txr));
|
|
|
|
for (i = 0; i < vq_size; i++) {
|
|
|
|
struct vring_desc *start_dp = txr[i].tx_indir;
|
|
|
|
|
|
|
|
vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
|
|
|
|
|
|
|
|
/* first indirect descriptor is always the tx header */
|
2016-06-01 16:12:13 +00:00
|
|
|
start_dp->addr = txvq->virtio_net_hdr_mem
|
2016-03-04 18:19:19 +00:00
|
|
|
+ i * sizeof(*txr)
|
|
|
|
+ offsetof(struct virtio_tx_region, tx_hdr);
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
start_dp->len = hw->vtnet_hdr_size;
|
2016-03-04 18:19:19 +00:00
|
|
|
start_dp->flags = VRING_DESC_F_NEXT;
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 09:03:20 +00:00
|
|
|
if (hw->vtpci_ops->setup_queue(hw, vq) < 0) {
|
|
|
|
PMD_INIT_LOG(ERR, "setup_queue failed");
|
|
|
|
virtio_dev_queue_release(vq);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-04-29 00:48:46 +00:00
|
|
|
vq->configured = 1;
|
2014-05-29 07:18:19 +00:00
|
|
|
return 0;
|
2016-06-01 16:12:13 +00:00
|
|
|
|
|
|
|
fail_q_alloc:
|
|
|
|
rte_free(sw_ring);
|
|
|
|
rte_memzone_free(hdr_mz);
|
|
|
|
rte_memzone_free(mz);
|
|
|
|
rte_free(vq);
|
|
|
|
|
|
|
|
return ret;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2014-05-29 07:18:20 +00:00
|
|
|
virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
|
|
|
|
uint32_t socket_id)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_ctl *cvq;
|
2013-09-18 10:00:00 +00:00
|
|
|
int ret;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
2014-05-29 07:18:20 +00:00
|
|
|
ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
|
2016-06-01 16:12:13 +00:00
|
|
|
vtpci_queue_idx, 0, socket_id, (void **)&cvq);
|
2013-09-18 10:00:00 +00:00
|
|
|
if (ret < 0) {
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(ERR, "control vq initialization failed");
|
2013-09-18 10:00:00 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
hw->cvq = cvq;
|
2014-05-29 07:18:19 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-15 13:51:03 +00:00
|
|
|
static void
|
|
|
|
virtio_free_queues(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++)
|
|
|
|
virtio_dev_rx_queue_release(dev->data->rx_queues[i]);
|
|
|
|
|
|
|
|
dev->data->nb_rx_queues = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++)
|
|
|
|
virtio_dev_tx_queue_release(dev->data->tx_queues[i]);
|
|
|
|
|
|
|
|
dev->data->nb_tx_queues = 0;
|
|
|
|
}
|
|
|
|
|
2014-02-12 16:44:44 +00:00
|
|
|
static void
|
|
|
|
virtio_dev_close(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:58 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
2014-02-12 16:44:44 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
|
|
|
|
|
2016-01-11 06:16:13 +00:00
|
|
|
if (hw->started == 1)
|
|
|
|
virtio_dev_stop(dev);
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
/* reset the NIC */
|
2016-05-09 16:35:57 +00:00
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2015-02-09 01:14:06 +00:00
|
|
|
vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
|
2015-02-09 01:13:58 +00:00
|
|
|
vtpci_reset(hw);
|
|
|
|
virtio_dev_free_mbufs(dev);
|
2015-07-15 13:51:03 +00:00
|
|
|
virtio_free_queues(dev);
|
2014-02-12 16:44:44 +00:00
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
static void
|
|
|
|
virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
|
|
|
|
ctrl.data[0] = 1;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
|
|
|
if (ret)
|
|
|
|
PMD_INIT_LOG(ERR, "Failed to enable promisc");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
|
|
|
|
ctrl.data[0] = 0;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
|
|
|
if (ret)
|
|
|
|
PMD_INIT_LOG(ERR, "Failed to disable promisc");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
|
|
|
|
ctrl.data[0] = 1;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
|
|
|
if (ret)
|
|
|
|
PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
|
|
|
|
ctrl.data[0] = 0;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
|
|
|
if (ret)
|
|
|
|
PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
|
|
|
|
}
|
|
|
|
|
2016-10-09 03:38:26 +00:00
|
|
|
#define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
|
|
|
|
static int
|
|
|
|
virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
|
|
|
|
hw->vtnet_hdr_size;
|
|
|
|
uint32_t frame_size = mtu + ether_hdr_len;
|
|
|
|
|
|
|
|
if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
|
|
|
|
PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
|
|
|
|
ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/*
|
|
|
|
* dev_ops for virtio, bare necessities for basic operation
|
|
|
|
*/
|
2015-04-07 21:21:03 +00:00
|
|
|
static const struct eth_dev_ops virtio_eth_dev_ops = {
|
2014-05-29 07:18:19 +00:00
|
|
|
.dev_configure = virtio_dev_configure,
|
|
|
|
.dev_start = virtio_dev_start,
|
|
|
|
.dev_stop = virtio_dev_stop,
|
|
|
|
.dev_close = virtio_dev_close,
|
2014-11-08 04:26:15 +00:00
|
|
|
.promiscuous_enable = virtio_dev_promiscuous_enable,
|
|
|
|
.promiscuous_disable = virtio_dev_promiscuous_disable,
|
|
|
|
.allmulticast_enable = virtio_dev_allmulticast_enable,
|
|
|
|
.allmulticast_disable = virtio_dev_allmulticast_disable,
|
2016-10-09 03:38:26 +00:00
|
|
|
.mtu_set = virtio_mtu_set,
|
2014-05-29 07:18:19 +00:00
|
|
|
.dev_infos_get = virtio_dev_info_get,
|
|
|
|
.stats_get = virtio_dev_stats_get,
|
2015-11-02 10:19:00 +00:00
|
|
|
.xstats_get = virtio_dev_xstats_get,
|
2016-06-15 15:25:32 +00:00
|
|
|
.xstats_get_names = virtio_dev_xstats_get_names,
|
2014-05-29 07:18:19 +00:00
|
|
|
.stats_reset = virtio_dev_stats_reset,
|
2015-11-02 10:19:00 +00:00
|
|
|
.xstats_reset = virtio_dev_stats_reset,
|
2014-05-29 07:18:19 +00:00
|
|
|
.link_update = virtio_dev_link_update,
|
|
|
|
.rx_queue_setup = virtio_dev_rx_queue_setup,
|
|
|
|
.rx_queue_release = virtio_dev_rx_queue_release,
|
|
|
|
.tx_queue_setup = virtio_dev_tx_queue_setup,
|
|
|
|
.tx_queue_release = virtio_dev_tx_queue_release,
|
2014-05-29 07:18:20 +00:00
|
|
|
/* collect stats per queue */
|
2014-06-13 01:32:40 +00:00
|
|
|
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
|
2015-02-09 01:14:02 +00:00
|
|
|
.vlan_filter_set = virtio_vlan_filter_set,
|
2015-02-09 01:14:03 +00:00
|
|
|
.mac_addr_add = virtio_mac_addr_add,
|
|
|
|
.mac_addr_remove = virtio_mac_addr_remove,
|
2015-02-09 01:14:04 +00:00
|
|
|
.mac_addr_set = virtio_mac_addr_set,
|
2013-09-18 10:00:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_link *link)
|
|
|
|
{
|
|
|
|
struct rte_eth_link *dst = link;
|
|
|
|
struct rte_eth_link *src = &(dev->data->dev_link);
|
|
|
|
|
|
|
|
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
|
|
|
|
*(uint64_t *)src) == 0)
|
2014-06-13 01:32:40 +00:00
|
|
|
return -1;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2014-06-13 01:32:40 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Atomically writes the link status information into global
|
|
|
|
* structure rte_eth_dev.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* - Pointer to the structure rte_eth_dev to read from.
|
|
|
|
* - Pointer to the buffer to be saved with the link status.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* - On success, zero.
|
|
|
|
* - On failure, negative value.
|
|
|
|
*/
|
|
|
|
static inline int
|
|
|
|
virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_link *link)
|
|
|
|
{
|
|
|
|
struct rte_eth_link *dst = &(dev->data->dev_link);
|
|
|
|
struct rte_eth_link *src = link;
|
|
|
|
|
|
|
|
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
|
|
|
|
*(uint64_t *)src) == 0)
|
2014-06-13 01:32:40 +00:00
|
|
|
return -1;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2014-06-13 01:32:40 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2015-11-02 10:19:00 +00:00
|
|
|
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2014-06-14 01:06:18 +00:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
const struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->opackets += txvq->stats.packets;
|
|
|
|
stats->obytes += txvq->stats.bytes;
|
|
|
|
stats->oerrors += txvq->stats.errors;
|
2014-06-14 01:06:18 +00:00
|
|
|
|
|
|
|
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->q_opackets[i] = txvq->stats.packets;
|
|
|
|
stats->q_obytes[i] = txvq->stats.bytes;
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->ipackets += rxvq->stats.packets;
|
|
|
|
stats->ibytes += rxvq->stats.bytes;
|
|
|
|
stats->ierrors += rxvq->stats.errors;
|
2014-06-14 01:06:18 +00:00
|
|
|
|
|
|
|
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->q_ipackets[i] = rxvq->stats.packets;
|
|
|
|
stats->q_ibytes[i] = rxvq->stats.bytes;
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 15:25:32 +00:00
|
|
|
static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
|
|
__rte_unused unsigned limit)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
unsigned count = 0;
|
|
|
|
unsigned t;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
|
|
|
|
dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
|
2016-06-15 15:25:32 +00:00
|
|
|
|
2016-06-20 10:43:32 +00:00
|
|
|
if (xstats_names != NULL) {
|
2016-06-15 15:25:32 +00:00
|
|
|
/* Note: limit checked in rte_eth_xstats_names() */
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
|
|
struct virtqueue *rxvq = dev->data->rx_queues[i];
|
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
|
2016-06-15 15:25:32 +00:00
|
|
|
snprintf(xstats_names[count].name,
|
|
|
|
sizeof(xstats_names[count].name),
|
|
|
|
"rx_q%u_%s", i,
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_rxq_stat_strings[t].name);
|
2016-06-15 15:25:32 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
|
|
struct virtqueue *txvq = dev->data->tx_queues[i];
|
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
|
2016-06-15 15:25:32 +00:00
|
|
|
snprintf(xstats_names[count].name,
|
|
|
|
sizeof(xstats_names[count].name),
|
|
|
|
"tx_q%u_%s", i,
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_txq_stat_strings[t].name);
|
2016-06-15 15:25:32 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
return nstats;
|
|
|
|
}
|
|
|
|
|
2015-11-02 10:19:00 +00:00
|
|
|
static int
|
2016-06-15 15:25:33 +00:00
|
|
|
virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
2015-11-02 10:19:00 +00:00
|
|
|
unsigned n)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
unsigned count = 0;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
|
|
|
|
dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
|
2015-11-02 10:19:00 +00:00
|
|
|
|
|
|
|
if (n < nstats)
|
|
|
|
return nstats;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
2015-11-02 10:19:00 +00:00
|
|
|
|
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned t;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
|
2015-11-02 10:19:00 +00:00
|
|
|
xstats[count].value = *(uint64_t *)(((char *)rxvq) +
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_rxq_stat_strings[t].offset);
|
2015-11-02 10:19:00 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
2015-11-02 10:19:00 +00:00
|
|
|
|
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned t;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
|
2015-11-02 10:19:00 +00:00
|
|
|
xstats[count].value = *(uint64_t *)(((char *)txvq) +
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_txq_stat_strings[t].offset);
|
2015-11-02 10:19:00 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
|
|
|
{
|
|
|
|
virtio_update_stats(dev, stats);
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
static void
|
|
|
|
virtio_dev_stats_reset(struct rte_eth_dev *dev)
|
|
|
|
{
|
2014-06-14 01:06:18 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
txvq->stats.packets = 0;
|
|
|
|
txvq->stats.bytes = 0;
|
|
|
|
txvq->stats.errors = 0;
|
|
|
|
txvq->stats.multicast = 0;
|
|
|
|
txvq->stats.broadcast = 0;
|
|
|
|
memset(txvq->stats.size_bins, 0,
|
|
|
|
sizeof(txvq->stats.size_bins[0]) * 8);
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
rxvq->stats.packets = 0;
|
|
|
|
rxvq->stats.bytes = 0;
|
|
|
|
rxvq->stats.errors = 0;
|
|
|
|
rxvq->stats.multicast = 0;
|
|
|
|
rxvq->stats.broadcast = 0;
|
|
|
|
memset(rxvq->stats.size_bins, 0,
|
|
|
|
sizeof(rxvq->stats.size_bins[0]) * 8);
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_set_hwaddr(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
vtpci_write_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mac),
|
|
|
|
&hw->mac_addr, ETHER_ADDR_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_get_hwaddr(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
|
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mac),
|
|
|
|
&hw->mac_addr, ETHER_ADDR_LEN);
|
|
|
|
} else {
|
|
|
|
eth_random_addr(&hw->mac_addr[0]);
|
|
|
|
virtio_set_hwaddr(hw);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-11 15:53:25 +00:00
|
|
|
static void
|
2015-02-09 01:14:03 +00:00
|
|
|
virtio_mac_table_set(struct virtio_hw *hw,
|
|
|
|
const struct virtio_net_ctrl_mac *uc,
|
|
|
|
const struct virtio_net_ctrl_mac *mc)
|
|
|
|
{
|
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int err, len[2];
|
|
|
|
|
2015-06-11 15:53:25 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
|
2016-04-19 05:22:37 +00:00
|
|
|
PMD_DRV_LOG(INFO, "host does not support mac table");
|
2015-06-11 15:53:25 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-09 01:14:03 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
|
|
|
|
|
|
|
|
len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
|
|
|
|
memcpy(ctrl.data, uc, len[0]);
|
|
|
|
|
|
|
|
len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
|
|
|
|
memcpy(ctrl.data + len[0], mc, len[1]);
|
|
|
|
|
|
|
|
err = virtio_send_command(hw->cvq, &ctrl, len, 2);
|
|
|
|
if (err != 0)
|
|
|
|
PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
|
|
|
|
uint32_t index, uint32_t vmdq __rte_unused)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
const struct ether_addr *addrs = dev->data->mac_addrs;
|
|
|
|
unsigned int i;
|
|
|
|
struct virtio_net_ctrl_mac *uc, *mc;
|
|
|
|
|
|
|
|
if (index >= VIRTIO_MAX_MAC_ADDRS) {
|
|
|
|
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
|
|
|
|
uc->entries = 0;
|
|
|
|
mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
|
|
|
|
mc->entries = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
|
|
|
|
const struct ether_addr *addr
|
|
|
|
= (i == index) ? mac_addr : addrs + i;
|
|
|
|
struct virtio_net_ctrl_mac *tbl
|
|
|
|
= is_multicast_ether_addr(addr) ? mc : uc;
|
|
|
|
|
|
|
|
memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_mac_table_set(hw, uc, mc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
struct ether_addr *addrs = dev->data->mac_addrs;
|
|
|
|
struct virtio_net_ctrl_mac *uc, *mc;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (index >= VIRTIO_MAX_MAC_ADDRS) {
|
|
|
|
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
|
|
|
|
uc->entries = 0;
|
|
|
|
mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
|
|
|
|
mc->entries = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
|
|
|
|
struct virtio_net_ctrl_mac *tbl;
|
|
|
|
|
|
|
|
if (i == index || is_zero_ether_addr(addrs + i))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
|
|
|
|
memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_mac_table_set(hw, uc, mc);
|
|
|
|
}
|
|
|
|
|
2015-02-09 01:14:04 +00:00
|
|
|
static void
|
|
|
|
virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
|
|
|
memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
|
|
|
|
|
|
|
|
/* Use atomic update if available */
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
|
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int len = ETHER_ADDR_LEN;
|
|
|
|
|
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
|
|
|
|
|
|
|
|
memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
|
|
|
|
virtio_send_command(hw->cvq, &ctrl, &len, 1);
|
|
|
|
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
|
|
|
|
virtio_set_hwaddr(hw);
|
|
|
|
}
|
|
|
|
|
2015-02-09 01:14:02 +00:00
|
|
|
static int
|
|
|
|
virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
|
|
|
|
ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
|
|
|
|
memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
|
|
|
|
len = sizeof(vlan_id);
|
|
|
|
|
|
|
|
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
static int
|
2013-11-08 02:00:00 +00:00
|
|
|
virtio_negotiate_features(struct virtio_hw *hw)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2016-02-02 13:48:16 +00:00
|
|
|
uint64_t host_features;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/* Prepare guest_features: feature that driver wants to support */
|
2015-06-11 15:53:26 +00:00
|
|
|
hw->guest_features = VIRTIO_PMD_GUEST_FEATURES;
|
2016-02-02 13:48:16 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
|
2014-07-24 04:57:45 +00:00
|
|
|
hw->guest_features);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/* Read device(host) feature bits */
|
2016-02-02 13:48:14 +00:00
|
|
|
host_features = hw->vtpci_ops->get_features(hw);
|
2016-02-02 13:48:16 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
|
2014-06-14 01:06:25 +00:00
|
|
|
host_features);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
/*
|
|
|
|
* Negotiate features: Subset of device feature bits are written back
|
|
|
|
* guest feature bits.
|
|
|
|
*/
|
2014-06-14 01:06:25 +00:00
|
|
|
hw->guest_features = vtpci_negotiate_features(hw, host_features);
|
2016-02-02 13:48:16 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
|
2014-05-29 07:18:20 +00:00
|
|
|
hw->guest_features);
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
|
|
|
|
if (hw->modern) {
|
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"VIRTIO_F_VERSION_1 features is not enabled.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
|
|
|
|
if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"failed to set FEATURES_OK status!");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-02-09 01:13:53 +00:00
|
|
|
/*
|
|
|
|
* Process Virtio Config changed interrupt and call the callback
|
|
|
|
* if link state changed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
|
|
|
|
void *param)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = param;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2015-02-09 01:13:53 +00:00
|
|
|
uint8_t isr;
|
|
|
|
|
|
|
|
/* Read interrupt status which clears interrupt */
|
|
|
|
isr = vtpci_isr(hw);
|
|
|
|
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
|
|
|
|
|
|
|
|
if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
|
|
|
|
PMD_DRV_LOG(ERR, "interrupt enable failed");
|
|
|
|
|
|
|
|
if (isr & VIRTIO_PCI_ISR_CONFIG) {
|
|
|
|
if (virtio_dev_link_update(dev, 0) == 0)
|
|
|
|
_rte_eth_dev_callback_process(dev,
|
|
|
|
RTE_ETH_EVENT_INTR_LSC);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-03-27 13:23:15 +00:00
|
|
|
static void
|
|
|
|
rx_func_get(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = eth_dev->data->dev_private;
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
|
|
|
|
eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
|
|
|
|
else
|
|
|
|
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
|
|
|
|
}
|
|
|
|
|
2016-10-13 14:16:00 +00:00
|
|
|
static int
|
|
|
|
virtio_init_device(struct rte_eth_dev *eth_dev)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = eth_dev->data->dev_private;
|
2014-05-29 07:18:20 +00:00
|
|
|
struct virtio_net_config *config;
|
|
|
|
struct virtio_net_config local_config;
|
2016-10-13 14:16:00 +00:00
|
|
|
struct rte_pci_device *pci_dev = eth_dev->pci_dev;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/* Reset the device although not necessary at startup */
|
|
|
|
vtpci_reset(hw);
|
|
|
|
|
|
|
|
/* Tell the host we've noticed this device. */
|
|
|
|
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
|
|
|
|
|
|
|
|
/* Tell the host we've known how to drive the device. */
|
|
|
|
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
if (virtio_negotiate_features(hw) < 0)
|
|
|
|
return -1;
|
2014-05-29 07:18:19 +00:00
|
|
|
|
2015-08-28 16:23:37 +00:00
|
|
|
/* If host does not support status then disable LSC */
|
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
|
2016-10-13 14:16:00 +00:00
|
|
|
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
|
|
|
|
else
|
|
|
|
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
|
2015-08-28 16:23:37 +00:00
|
|
|
|
2015-12-04 15:14:26 +00:00
|
|
|
rte_eth_copy_pci_info(eth_dev, pci_dev);
|
|
|
|
|
2015-03-27 13:23:15 +00:00
|
|
|
rx_func_get(eth_dev);
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/* Setting up rx_header size for the device */
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
|
|
|
|
vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
|
2013-09-18 10:00:00 +00:00
|
|
|
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
2015-03-27 13:23:15 +00:00
|
|
|
else
|
2013-09-18 10:00:00 +00:00
|
|
|
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
|
|
|
|
|
|
|
|
/* Copy the permanent MAC address to: virtio_hw */
|
|
|
|
virtio_get_hwaddr(hw);
|
|
|
|
ether_addr_copy((struct ether_addr *) hw->mac_addr,
|
|
|
|
ð_dev->data->mac_addrs[0]);
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
|
2014-06-13 01:32:40 +00:00
|
|
|
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
|
|
|
|
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
|
|
|
|
config = &local_config;
|
|
|
|
|
2015-10-22 12:35:53 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mac),
|
|
|
|
&config->mac, sizeof(config->mac));
|
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
|
2015-10-22 12:35:53 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, status),
|
|
|
|
&config->status, sizeof(config->status));
|
2014-05-29 07:18:20 +00:00
|
|
|
} else {
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"VIRTIO_NET_F_STATUS is not supported");
|
2014-05-29 07:18:20 +00:00
|
|
|
config->status = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
|
2015-10-22 12:35:53 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, max_virtqueue_pairs),
|
|
|
|
&config->max_virtqueue_pairs,
|
|
|
|
sizeof(config->max_virtqueue_pairs));
|
2014-05-29 07:18:20 +00:00
|
|
|
} else {
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"VIRTIO_NET_F_MQ is not supported");
|
2014-05-29 07:18:20 +00:00
|
|
|
config->max_virtqueue_pairs = 1;
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
hw->max_rx_queues =
|
|
|
|
(VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
|
|
|
|
VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
|
|
|
|
hw->max_tx_queues =
|
|
|
|
(VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ?
|
|
|
|
VIRTIO_MAX_TX_QUEUES : config->max_virtqueue_pairs;
|
|
|
|
|
|
|
|
virtio_dev_cq_queue_setup(eth_dev,
|
|
|
|
config->max_virtqueue_pairs * 2,
|
|
|
|
SOCKET_ID_ANY);
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
|
2014-05-29 07:18:20 +00:00
|
|
|
config->max_virtqueue_pairs);
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
|
2014-05-29 07:18:20 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
|
2014-05-29 07:18:20 +00:00
|
|
|
config->mac[0], config->mac[1],
|
|
|
|
config->mac[2], config->mac[3],
|
|
|
|
config->mac[4], config->mac[5]);
|
|
|
|
} else {
|
|
|
|
hw->max_rx_queues = 1;
|
|
|
|
hw->max_tx_queues = 1;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d hw->max_tx_queues=%d",
|
2014-05-29 07:18:20 +00:00
|
|
|
hw->max_rx_queues, hw->max_tx_queues);
|
net/virtio-user: add virtual device
Add a new virtual device named virtio-user, which can be used just like
eth_ring, eth_null, etc. To reuse the code of original virtio, we do
some adjustment in virtio_ethdev.c, such as remove key _static_ of
eth_virtio_dev_init() so that it can be reused in virtual device; and
we add some check to make sure it will not crash.
Configured parameters include:
- queues (optional, 1 by default), number of queue pairs, multi-queue
not supported for now.
- cq (optional, 0 by default), not supported for now.
- mac (optional), random value will be given if not specified.
- queue_size (optional, 256 by default), size of virtqueues.
- path (madatory), path of vhost user.
When enable CONFIG_RTE_VIRTIO_USER (enabled by default), the compiled
library can be used in both VM and container environment.
Examples:
path_vhost=<path_to_vhost_user> # use vhost-user as a backend
sudo ./examples/l2fwd/build/l2fwd -c 0x100000 -n 4 \
--socket-mem 0,1024 --no-pci --file-prefix=l2fwd \
--vdev=virtio-user0,mac=00:01:02:03:04:05,path=$path_vhost -- -p 0x1
Known issues:
- Control queue and multi-queue are not supported yet.
- Cannot work with --huge-unlink.
- Cannot work with no-huge.
- Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8)
hugepages.
- Root privilege is a must (mainly becase of sorting hugepages according
to physical address).
- Applications should not use file name like HUGEFILE_FMT ("%smap_%d").
- Cannot work with vhost-net backend.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:25 +00:00
|
|
|
if (pci_dev)
|
|
|
|
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
|
2013-09-18 10:00:00 +00:00
|
|
|
eth_dev->data->port_id, pci_dev->id.vendor_id,
|
|
|
|
pci_dev->id.device_id);
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2016-10-13 14:16:00 +00:00
|
|
|
virtio_dev_cq_start(eth_dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is based on probe() function in virtio_pci.c
|
|
|
|
* It returns 0 on success.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = eth_dev->data->dev_private;
|
|
|
|
struct rte_pci_device *pci_dev;
|
|
|
|
uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
|
|
|
|
|
|
|
|
eth_dev->dev_ops = &virtio_eth_dev_ops;
|
|
|
|
eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
|
|
|
|
rx_func_get(eth_dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate memory for storing MAC addresses */
|
|
|
|
eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
|
|
|
|
if (eth_dev->data->mac_addrs == NULL) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"Failed to allocate %d bytes needed to store MAC addresses",
|
|
|
|
VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_dev = eth_dev->pci_dev;
|
|
|
|
|
|
|
|
if (pci_dev) {
|
|
|
|
ret = vtpci_init(pci_dev, hw, &dev_flags);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
eth_dev->data->dev_flags = dev_flags;
|
|
|
|
|
|
|
|
/* reset device and negotiate features */
|
|
|
|
ret = virtio_init_device(eth_dev);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-02-09 01:13:53 +00:00
|
|
|
/* Setup interrupt callback */
|
2016-05-09 16:35:57 +00:00
|
|
|
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2015-02-09 01:14:06 +00:00
|
|
|
rte_intr_callback_register(&pci_dev->intr_handle,
|
2016-10-13 14:16:00 +00:00
|
|
|
virtio_interrupt_handler, eth_dev);
|
2015-02-09 01:13:58 +00:00
|
|
|
|
2014-05-29 07:18:19 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-15 13:51:00 +00:00
|
|
|
static int
|
|
|
|
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct rte_pci_device *pci_dev;
|
|
|
|
struct virtio_hw *hw = eth_dev->data->dev_private;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
|
|
|
|
return -EPERM;
|
|
|
|
|
2016-01-11 06:16:13 +00:00
|
|
|
/* Close it anyway since there's no way to know if closed */
|
|
|
|
virtio_dev_close(eth_dev);
|
|
|
|
|
2015-07-15 13:51:00 +00:00
|
|
|
pci_dev = eth_dev->pci_dev;
|
|
|
|
|
|
|
|
eth_dev->dev_ops = NULL;
|
|
|
|
eth_dev->tx_pkt_burst = NULL;
|
|
|
|
eth_dev->rx_pkt_burst = NULL;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
if (hw->cvq)
|
|
|
|
virtio_dev_queue_release(hw->cvq->vq);
|
2015-07-15 13:51:00 +00:00
|
|
|
|
|
|
|
rte_free(eth_dev->data->mac_addrs);
|
|
|
|
eth_dev->data->mac_addrs = NULL;
|
|
|
|
|
|
|
|
/* reset interrupt callback */
|
2016-05-09 16:35:57 +00:00
|
|
|
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2015-07-15 13:51:00 +00:00
|
|
|
rte_intr_callback_unregister(&pci_dev->intr_handle,
|
|
|
|
virtio_interrupt_handler,
|
|
|
|
eth_dev);
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
rte_eal_pci_unmap_device(pci_dev);
|
2015-07-15 13:51:00 +00:00
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
static struct eth_driver rte_virtio_pmd = {
|
2015-05-29 15:47:51 +00:00
|
|
|
.pci_drv = {
|
2016-10-07 13:03:13 +00:00
|
|
|
.driver = {
|
|
|
|
.name = "net_virtio",
|
|
|
|
},
|
2013-09-18 10:00:00 +00:00
|
|
|
.id_table = pci_id_virtio_map,
|
2015-07-15 13:51:00 +00:00
|
|
|
.drv_flags = RTE_PCI_DRV_DETACHABLE,
|
2016-09-20 12:41:20 +00:00
|
|
|
.probe = rte_eth_dev_pci_probe,
|
|
|
|
.remove = rte_eth_dev_pci_remove,
|
2013-09-18 10:00:00 +00:00
|
|
|
},
|
|
|
|
.eth_dev_init = eth_virtio_dev_init,
|
2015-07-15 13:51:00 +00:00
|
|
|
.eth_dev_uninit = eth_virtio_dev_uninit,
|
2015-02-09 01:13:56 +00:00
|
|
|
.dev_private_size = sizeof(struct virtio_hw),
|
2013-09-18 10:00:00 +00:00
|
|
|
};
|
|
|
|
|
2016-09-20 12:41:20 +00:00
|
|
|
RTE_INIT(rte_virtio_pmd_init);
|
|
|
|
static void
|
|
|
|
rte_virtio_pmd_init(void)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2014-08-26 14:11:39 +00:00
|
|
|
if (rte_eal_iopl_init() != 0) {
|
|
|
|
PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
|
2016-09-20 12:41:20 +00:00
|
|
|
return;
|
2014-08-26 14:11:39 +00:00
|
|
|
}
|
|
|
|
|
2016-09-20 12:41:20 +00:00
|
|
|
rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure virtio device
|
|
|
|
* It returns 0 on success.
|
|
|
|
*/
|
|
|
|
static int
|
2014-06-14 01:06:22 +00:00
|
|
|
virtio_dev_configure(struct rte_eth_dev *dev)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2014-06-14 01:06:22 +00:00
|
|
|
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-06-14 01:06:22 +00:00
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "configure");
|
|
|
|
|
|
|
|
if (rxmode->hw_ip_checksum) {
|
|
|
|
PMD_DRV_LOG(ERR, "HW IP checksum not supported");
|
2016-01-27 13:58:30 +00:00
|
|
|
return -EINVAL;
|
2014-06-14 01:06:22 +00:00
|
|
|
}
|
|
|
|
|
2015-02-09 01:13:55 +00:00
|
|
|
hw->vlan_strip = rxmode->hw_vlan_strip;
|
|
|
|
|
2015-02-09 01:14:02 +00:00
|
|
|
if (rxmode->hw_vlan_filter
|
|
|
|
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
|
|
|
|
PMD_DRV_LOG(NOTICE,
|
|
|
|
"vlan filtering not available on this host");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2016-05-09 16:35:57 +00:00
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2015-02-09 01:14:06 +00:00
|
|
|
if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
|
|
|
|
PMD_DRV_LOG(ERR, "failed to set config vector");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_dev_start(struct rte_eth_dev *dev)
|
|
|
|
{
|
2014-05-29 07:18:20 +00:00
|
|
|
uint16_t nb_queues, i;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_rx *rxvq;
|
|
|
|
struct virtnet_tx *txvq __rte_unused;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-02-09 01:13:53 +00:00
|
|
|
/* check if lsc interrupt feature is enabled */
|
2015-08-28 16:23:37 +00:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc) {
|
2016-05-09 16:35:57 +00:00
|
|
|
if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
|
2015-02-09 01:13:53 +00:00
|
|
|
PMD_DRV_LOG(ERR, "link status not supported by host");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
|
|
|
|
PMD_DRV_LOG(ERR, "interrupt enable failed");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2015-02-09 01:13:53 +00:00
|
|
|
|
|
|
|
/* Initialize Link state */
|
|
|
|
virtio_dev_link_update(dev, 0);
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
/* On restart after stop do not touch queues */
|
|
|
|
if (hw->started)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Do final configuration before rx/tx engine starts */
|
|
|
|
virtio_dev_rxtx_start(dev);
|
2013-09-18 10:00:00 +00:00
|
|
|
vtpci_reinit_complete(hw);
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
hw->started = 1;
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/*Notify the backend
|
|
|
|
*Otherwise the tap backend might already stop its queue due to fullness.
|
|
|
|
*vhost backend will have no chance to be waked up
|
|
|
|
*/
|
2014-05-29 07:18:20 +00:00
|
|
|
nb_queues = dev->data->nb_rx_queues;
|
|
|
|
if (nb_queues > 1) {
|
|
|
|
if (virtio_set_multiple_queues(dev, nb_queues) != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (i = 0; i < nb_queues; i++) {
|
|
|
|
rxvq = dev->data->rx_queues[i];
|
|
|
|
virtqueue_notify(rxvq->vq);
|
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
|
|
rxvq = dev->data->rx_queues[i];
|
|
|
|
VIRTQUEUE_DUMP(rxvq->vq);
|
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
|
|
txvq = dev->data->tx_queues[i];
|
|
|
|
VIRTQUEUE_DUMP(txvq->vq);
|
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
|
|
|
|
{
|
2014-05-29 07:18:20 +00:00
|
|
|
struct rte_mbuf *buf;
|
|
|
|
int i, mbuf_num = 0;
|
2014-06-13 01:32:40 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
|
|
|
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"Before freeing rxq[%d] used and unused buf", i);
|
2016-06-01 16:12:13 +00:00
|
|
|
VIRTQUEUE_DUMP(rxvq->vq);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
|
|
|
|
while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
|
2014-08-14 08:54:35 +00:00
|
|
|
rte_pktmbuf_free(buf);
|
2014-05-29 07:18:20 +00:00
|
|
|
mbuf_num++;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"After freeing rxq[%d] used and unused buf", i);
|
2016-06-01 16:12:13 +00:00
|
|
|
VIRTQUEUE_DUMP(rxvq->vq);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
|
|
|
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"Before freeing txq[%d] used and unused bufs",
|
2014-06-13 01:32:40 +00:00
|
|
|
i);
|
2016-06-01 16:12:13 +00:00
|
|
|
VIRTQUEUE_DUMP(txvq->vq);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
mbuf_num = 0;
|
2016-06-01 16:12:13 +00:00
|
|
|
while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
|
2014-08-14 08:54:35 +00:00
|
|
|
rte_pktmbuf_free(buf);
|
2014-05-29 07:18:20 +00:00
|
|
|
mbuf_num++;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
|
|
|
|
PMD_INIT_LOG(DEBUG,
|
|
|
|
"After freeing txq[%d] used and unused buf", i);
|
2016-06-01 16:12:13 +00:00
|
|
|
VIRTQUEUE_DUMP(txvq->vq);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-02-09 01:13:58 +00:00
|
|
|
* Stop device: disable interrupt and mark link down
|
2013-09-18 10:00:00 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
virtio_dev_stop(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:58 +00:00
|
|
|
struct rte_eth_link link;
|
2016-01-11 06:16:13 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "stop");
|
|
|
|
|
2016-01-11 06:16:13 +00:00
|
|
|
hw->started = 0;
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc)
|
|
|
|
rte_intr_disable(&dev->pci_dev->intr_handle);
|
|
|
|
|
|
|
|
memset(&link, 0, sizeof(link));
|
|
|
|
virtio_dev_atomic_write_link_status(dev, &link);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
|
|
|
|
{
|
|
|
|
struct rte_eth_link link, old;
|
|
|
|
uint16_t status;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2013-09-18 10:00:00 +00:00
|
|
|
memset(&link, 0, sizeof(link));
|
|
|
|
virtio_dev_atomic_read_link_status(dev, &link);
|
|
|
|
old = link;
|
2016-03-31 22:12:25 +00:00
|
|
|
link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
2014-06-13 01:32:40 +00:00
|
|
|
link.link_speed = SPEED_10G;
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2014-06-13 01:32:40 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Get link status from hw");
|
2013-09-18 10:00:00 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, status),
|
|
|
|
&status, sizeof(status));
|
2014-06-13 01:32:40 +00:00
|
|
|
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
|
2016-03-31 22:12:24 +00:00
|
|
|
link.link_status = ETH_LINK_DOWN;
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Port %d is down",
|
2014-06-13 01:32:40 +00:00
|
|
|
dev->data->port_id);
|
2013-09-18 10:00:00 +00:00
|
|
|
} else {
|
2016-03-31 22:12:24 +00:00
|
|
|
link.link_status = ETH_LINK_UP;
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Port %d is up",
|
2014-06-13 01:32:40 +00:00
|
|
|
dev->data->port_id);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
2016-03-31 22:12:24 +00:00
|
|
|
link.link_status = ETH_LINK_UP;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
virtio_dev_atomic_write_link_status(dev, &link);
|
2015-02-09 01:13:53 +00:00
|
|
|
|
|
|
|
return (old.link_status == link.link_status) ? -1 : 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-06-13 01:32:40 +00:00
|
|
|
|
net/virtio-user: add virtual device
Add a new virtual device named virtio-user, which can be used just like
eth_ring, eth_null, etc. To reuse the code of original virtio, we do
some adjustment in virtio_ethdev.c, such as remove key _static_ of
eth_virtio_dev_init() so that it can be reused in virtual device; and
we add some check to make sure it will not crash.
Configured parameters include:
- queues (optional, 1 by default), number of queue pairs, multi-queue
not supported for now.
- cq (optional, 0 by default), not supported for now.
- mac (optional), random value will be given if not specified.
- queue_size (optional, 256 by default), size of virtqueues.
- path (madatory), path of vhost user.
When enable CONFIG_RTE_VIRTIO_USER (enabled by default), the compiled
library can be used in both VM and container environment.
Examples:
path_vhost=<path_to_vhost_user> # use vhost-user as a backend
sudo ./examples/l2fwd/build/l2fwd -c 0x100000 -n 4 \
--socket-mem 0,1024 --no-pci --file-prefix=l2fwd \
--vdev=virtio-user0,mac=00:01:02:03:04:05,path=$path_vhost -- -p 0x1
Known issues:
- Control queue and multi-queue are not supported yet.
- Cannot work with --huge-unlink.
- Cannot work with no-huge.
- Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8)
hugepages.
- Root privilege is a must (mainly becase of sorting hugepages according
to physical address).
- Applications should not use file name like HUGEFILE_FMT ("%smap_%d").
- Cannot work with vhost-net backend.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:25 +00:00
|
|
|
if (dev->pci_dev)
|
2016-09-20 12:41:33 +00:00
|
|
|
dev_info->driver_name = dev->driver->pci_drv.driver.name;
|
net/virtio-user: add virtual device
Add a new virtual device named virtio-user, which can be used just like
eth_ring, eth_null, etc. To reuse the code of original virtio, we do
some adjustment in virtio_ethdev.c, such as remove key _static_ of
eth_virtio_dev_init() so that it can be reused in virtual device; and
we add some check to make sure it will not crash.
Configured parameters include:
- queues (optional, 1 by default), number of queue pairs, multi-queue
not supported for now.
- cq (optional, 0 by default), not supported for now.
- mac (optional), random value will be given if not specified.
- queue_size (optional, 256 by default), size of virtqueues.
- path (madatory), path of vhost user.
When enable CONFIG_RTE_VIRTIO_USER (enabled by default), the compiled
library can be used in both VM and container environment.
Examples:
path_vhost=<path_to_vhost_user> # use vhost-user as a backend
sudo ./examples/l2fwd/build/l2fwd -c 0x100000 -n 4 \
--socket-mem 0,1024 --no-pci --file-prefix=l2fwd \
--vdev=virtio-user0,mac=00:01:02:03:04:05,path=$path_vhost -- -p 0x1
Known issues:
- Control queue and multi-queue are not supported yet.
- Cannot work with --huge-unlink.
- Cannot work with no-huge.
- Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8)
hugepages.
- Root privilege is a must (mainly becase of sorting hugepages according
to physical address).
- Applications should not use file name like HUGEFILE_FMT ("%smap_%d").
- Cannot work with vhost-net backend.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:25 +00:00
|
|
|
else
|
2016-07-22 02:24:47 +00:00
|
|
|
dev_info->driver_name = "virtio_user PMD";
|
2013-09-18 10:00:00 +00:00
|
|
|
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
|
|
|
|
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
|
|
|
|
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
|
|
|
|
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
|
|
|
|
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
|
2015-02-23 07:51:25 +00:00
|
|
|
dev_info->default_txconf = (struct rte_eth_txconf) {
|
|
|
|
.txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
|
|
|
|
};
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-04-21 14:59:37 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
/*
|
|
|
|
* It enables testpmd to collect per queue stats.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
|
|
|
|
__rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
|
|
|
|
__rte_unused uint8_t is_rx)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-20 12:41:20 +00:00
|
|
|
DRIVER_EXPORT_NAME(net_virtio, __COUNTER__);
|
2016-08-24 22:24:54 +00:00
|
|
|
DRIVER_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
|