2017-12-19 15:49:01 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2016 Intel Corporation
|
2013-09-18 10:00:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2018-01-22 00:16:22 +00:00
|
|
|
#include <rte_ethdev_driver.h>
|
2017-04-11 15:44:24 +00:00
|
|
|
#include <rte_ethdev_pci.h>
|
2013-09-18 10:00:00 +00:00
|
|
|
#include <rte_memcpy.h>
|
|
|
|
#include <rte_string_fns.h>
|
|
|
|
#include <rte_memzone.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_pci.h>
|
2017-10-26 10:06:08 +00:00
|
|
|
#include <rte_bus_pci.h>
|
2013-09-18 10:00:00 +00:00
|
|
|
#include <rte_ether.h>
|
2018-01-18 02:20:38 +00:00
|
|
|
#include <rte_ip.h>
|
|
|
|
#include <rte_arp.h>
|
2013-09-18 10:00:00 +00:00
|
|
|
#include <rte_common.h>
|
2015-07-15 13:51:00 +00:00
|
|
|
#include <rte_errno.h>
|
2017-09-07 12:13:44 +00:00
|
|
|
#include <rte_cpuflags.h>
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_eal.h>
|
2014-04-21 14:59:37 +00:00
|
|
|
#include <rte_dev.h>
|
2018-01-10 01:23:53 +00:00
|
|
|
#include <rte_cycles.h>
|
2018-04-17 07:06:22 +00:00
|
|
|
#include <rte_kvargs.h>
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
#include "virtio_ethdev.h"
|
|
|
|
#include "virtio_pci.h"
|
|
|
|
#include "virtio_logs.h"
|
|
|
|
#include "virtqueue.h"
|
2015-10-29 14:53:22 +00:00
|
|
|
#include "virtio_rxtx.h"
|
2019-06-05 09:43:41 +00:00
|
|
|
#include "virtio_user/virtio_user_dev.h"
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-07-15 13:51:00 +00:00
|
|
|
static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
|
2013-09-18 10:00:00 +00:00
|
|
|
static int virtio_dev_configure(struct rte_eth_dev *dev);
|
|
|
|
static int virtio_dev_start(struct rte_eth_dev *dev);
|
|
|
|
static void virtio_dev_stop(struct rte_eth_dev *dev);
|
2019-09-14 11:37:24 +00:00
|
|
|
static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
|
|
|
|
static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
|
2019-09-24 12:56:10 +00:00
|
|
|
static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
|
|
|
|
static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
|
2019-09-12 16:42:28 +00:00
|
|
|
static int virtio_dev_info_get(struct rte_eth_dev *dev,
|
2013-09-18 10:00:00 +00:00
|
|
|
struct rte_eth_dev_info *dev_info);
|
|
|
|
static int virtio_dev_link_update(struct rte_eth_dev *dev,
|
2017-05-12 10:33:03 +00:00
|
|
|
int wait_to_complete);
|
2017-09-01 02:36:28 +00:00
|
|
|
static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
static void virtio_set_hwaddr(struct virtio_hw *hw);
|
|
|
|
static void virtio_get_hwaddr(struct virtio_hw *hw);
|
|
|
|
|
2017-10-10 20:20:18 +00:00
|
|
|
static int virtio_dev_stats_get(struct rte_eth_dev *dev,
|
2015-11-02 10:19:00 +00:00
|
|
|
struct rte_eth_stats *stats);
|
|
|
|
static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
|
2016-06-15 15:25:33 +00:00
|
|
|
struct rte_eth_xstat *xstats, unsigned n);
|
2016-06-15 15:25:32 +00:00
|
|
|
static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
|
|
unsigned limit);
|
2019-09-06 14:34:54 +00:00
|
|
|
static int virtio_dev_stats_reset(struct rte_eth_dev *dev);
|
2013-09-18 10:00:00 +00:00
|
|
|
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
|
2015-02-09 01:14:02 +00:00
|
|
|
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
|
|
|
|
uint16_t vlan_id, int on);
|
2017-05-05 00:40:00 +00:00
|
|
|
static int virtio_mac_addr_add(struct rte_eth_dev *dev,
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *mac_addr,
|
2017-05-12 10:33:03 +00:00
|
|
|
uint32_t index, uint32_t vmdq);
|
2015-02-09 01:14:03 +00:00
|
|
|
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
|
2018-04-11 16:32:51 +00:00
|
|
|
static int virtio_mac_addr_set(struct rte_eth_dev *dev,
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *mac_addr);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2017-11-09 09:21:24 +00:00
|
|
|
static int virtio_intr_disable(struct rte_eth_dev *dev);
|
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
static int virtio_dev_queue_stats_mapping_set(
|
2017-05-12 10:33:03 +00:00
|
|
|
struct rte_eth_dev *eth_dev,
|
|
|
|
uint16_t queue_id,
|
|
|
|
uint8_t stat_idx,
|
|
|
|
uint8_t is_rx);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2017-12-19 06:38:35 +00:00
|
|
|
int virtio_logtype_init;
|
|
|
|
int virtio_logtype_driver;
|
|
|
|
|
2018-01-18 02:20:38 +00:00
|
|
|
static void virtio_notify_peers(struct rte_eth_dev *dev);
|
|
|
|
static void virtio_ack_link_announce(struct rte_eth_dev *dev);
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/*
|
|
|
|
* The set of PCI devices this driver supports
|
|
|
|
*/
|
2015-04-16 23:23:39 +00:00
|
|
|
static const struct rte_pci_id pci_id_virtio_map[] = {
|
2016-09-28 08:25:11 +00:00
|
|
|
{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
|
|
|
|
{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
|
2016-07-11 14:40:40 +00:00
|
|
|
{ .vendor_id = 0, /* sentinel */ },
|
2013-09-18 10:00:00 +00:00
|
|
|
};
|
|
|
|
|
2015-11-02 10:19:00 +00:00
|
|
|
struct rte_virtio_xstats_name_off {
|
|
|
|
char name[RTE_ETH_XSTATS_NAME_SIZE];
|
|
|
|
unsigned offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* [rt]x_qX_ is prepended to the name string here */
|
2016-06-01 16:12:13 +00:00
|
|
|
static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
|
|
|
|
{"good_packets", offsetof(struct virtnet_rx, stats.packets)},
|
|
|
|
{"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
|
|
|
|
{"errors", offsetof(struct virtnet_rx, stats.errors)},
|
|
|
|
{"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
|
|
|
|
{"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
|
|
|
|
{"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
|
|
|
|
{"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
|
|
|
|
{"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
|
|
|
|
{"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
|
|
|
|
{"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
|
|
|
|
{"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
|
2016-09-07 06:11:00 +00:00
|
|
|
{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
|
|
|
|
{"size_1519_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
|
2015-11-02 10:19:00 +00:00
|
|
|
};
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
/* [rt]x_qX_ is prepended to the name string here */
|
|
|
|
static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
|
|
|
|
{"good_packets", offsetof(struct virtnet_tx, stats.packets)},
|
|
|
|
{"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
|
|
|
|
{"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
|
|
|
|
{"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
|
|
|
|
{"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
|
|
|
|
{"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
|
|
|
|
{"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
|
|
|
|
{"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
|
|
|
|
{"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
|
|
|
|
{"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
|
2016-09-07 06:11:00 +00:00
|
|
|
{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
|
|
|
|
{"size_1519_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
|
2016-06-01 16:12:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
|
|
|
|
sizeof(rte_virtio_rxq_stat_strings[0]))
|
|
|
|
#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
|
|
|
|
sizeof(rte_virtio_txq_stat_strings[0]))
|
2015-11-02 10:19:00 +00:00
|
|
|
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:17 +00:00
|
|
|
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
|
|
|
|
|
2018-12-17 21:31:36 +00:00
|
|
|
static struct virtio_pmd_ctrl *
|
2019-01-22 17:01:40 +00:00
|
|
|
virtio_send_command_packed(struct virtnet_ctl *cvq,
|
|
|
|
struct virtio_pmd_ctrl *ctrl,
|
|
|
|
int *dlen, int pkt_num)
|
2018-12-17 21:31:36 +00:00
|
|
|
{
|
|
|
|
struct virtqueue *vq = cvq->vq;
|
|
|
|
int head;
|
2019-03-19 06:43:08 +00:00
|
|
|
struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
|
2018-12-17 21:31:36 +00:00
|
|
|
struct virtio_pmd_ctrl *result;
|
2019-03-19 06:43:06 +00:00
|
|
|
uint16_t flags;
|
2018-12-17 21:31:36 +00:00
|
|
|
int sum = 0;
|
2019-01-22 17:01:40 +00:00
|
|
|
int nb_descs = 0;
|
2018-12-17 21:31:36 +00:00
|
|
|
int k;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Format is enforced in qemu code:
|
|
|
|
* One TX packet for header;
|
|
|
|
* At least one TX packet per argument;
|
|
|
|
* One RX packet for ACK.
|
|
|
|
*/
|
|
|
|
head = vq->vq_avail_idx;
|
2019-03-19 06:43:07 +00:00
|
|
|
flags = vq->vq_packed.cached_flags;
|
2018-12-17 21:31:36 +00:00
|
|
|
desc[head].addr = cvq->virtio_net_hdr_mem;
|
|
|
|
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
|
|
|
|
vq->vq_free_cnt--;
|
2019-01-22 17:01:40 +00:00
|
|
|
nb_descs++;
|
2018-12-17 21:31:36 +00:00
|
|
|
if (++vq->vq_avail_idx >= vq->vq_nentries) {
|
|
|
|
vq->vq_avail_idx -= vq->vq_nentries;
|
2019-03-25 05:27:16 +00:00
|
|
|
vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
|
2018-12-17 21:31:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (k = 0; k < pkt_num; k++) {
|
|
|
|
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
|
|
|
|
+ sizeof(struct virtio_net_ctrl_hdr)
|
|
|
|
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
|
|
|
|
desc[vq->vq_avail_idx].len = dlen[k];
|
2019-01-22 17:01:40 +00:00
|
|
|
desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_packed.cached_flags;
|
2018-12-17 21:31:36 +00:00
|
|
|
sum += dlen[k];
|
|
|
|
vq->vq_free_cnt--;
|
2019-01-22 17:01:40 +00:00
|
|
|
nb_descs++;
|
2018-12-17 21:31:36 +00:00
|
|
|
if (++vq->vq_avail_idx >= vq->vq_nentries) {
|
|
|
|
vq->vq_avail_idx -= vq->vq_nentries;
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_packed.cached_flags ^=
|
2019-03-25 05:27:16 +00:00
|
|
|
VRING_PACKED_DESC_F_AVAIL_USED;
|
2018-12-17 21:31:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
|
|
|
|
+ sizeof(struct virtio_net_ctrl_hdr);
|
|
|
|
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
|
2019-03-19 06:43:07 +00:00
|
|
|
desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
|
|
|
|
vq->vq_packed.cached_flags;
|
2018-12-17 21:31:36 +00:00
|
|
|
vq->vq_free_cnt--;
|
2019-01-22 17:01:40 +00:00
|
|
|
nb_descs++;
|
2018-12-17 21:31:36 +00:00
|
|
|
if (++vq->vq_avail_idx >= vq->vq_nentries) {
|
|
|
|
vq->vq_avail_idx -= vq->vq_nentries;
|
2019-03-25 05:27:16 +00:00
|
|
|
vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
|
2018-12-17 21:31:36 +00:00
|
|
|
}
|
|
|
|
|
2019-01-22 17:01:40 +00:00
|
|
|
virtio_wmb(vq->hw->weak_barriers);
|
2019-03-19 06:43:06 +00:00
|
|
|
desc[head].flags = VRING_DESC_F_NEXT | flags;
|
2019-01-22 17:01:40 +00:00
|
|
|
|
|
|
|
virtio_wmb(vq->hw->weak_barriers);
|
2018-12-17 21:31:36 +00:00
|
|
|
virtqueue_notify(vq);
|
|
|
|
|
|
|
|
/* wait for used descriptors in virtqueue */
|
2019-01-22 17:01:40 +00:00
|
|
|
while (!desc_is_used(&desc[head], vq))
|
2018-12-17 21:31:36 +00:00
|
|
|
usleep(100);
|
2019-01-22 17:01:40 +00:00
|
|
|
|
|
|
|
virtio_rmb(vq->hw->weak_barriers);
|
2018-12-17 21:31:36 +00:00
|
|
|
|
|
|
|
/* now get used descriptors */
|
2019-01-22 17:01:40 +00:00
|
|
|
vq->vq_free_cnt += nb_descs;
|
|
|
|
vq->vq_used_cons_idx += nb_descs;
|
|
|
|
if (vq->vq_used_cons_idx >= vq->vq_nentries) {
|
|
|
|
vq->vq_used_cons_idx -= vq->vq_nentries;
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_packed.used_wrap_counter ^= 1;
|
2018-12-17 21:31:36 +00:00
|
|
|
}
|
|
|
|
|
2019-01-22 17:01:41 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
|
|
|
|
"vq->vq_avail_idx=%d\n"
|
|
|
|
"vq->vq_used_cons_idx=%d\n"
|
2019-03-19 06:43:07 +00:00
|
|
|
"vq->vq_packed.cached_flags=0x%x\n"
|
|
|
|
"vq->vq_packed.used_wrap_counter=%d\n",
|
2019-01-22 17:01:41 +00:00
|
|
|
vq->vq_free_cnt,
|
|
|
|
vq->vq_avail_idx,
|
|
|
|
vq->vq_used_cons_idx,
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_packed.cached_flags,
|
|
|
|
vq->vq_packed.used_wrap_counter);
|
2019-01-22 17:01:41 +00:00
|
|
|
|
2018-12-17 21:31:36 +00:00
|
|
|
result = cvq->virtio_net_hdr_mz->addr;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-03-19 06:43:11 +00:00
|
|
|
static struct virtio_pmd_ctrl *
|
|
|
|
virtio_send_command_split(struct virtnet_ctl *cvq,
|
|
|
|
struct virtio_pmd_ctrl *ctrl,
|
|
|
|
int *dlen, int pkt_num)
|
2014-05-29 07:18:20 +00:00
|
|
|
{
|
2019-03-19 06:43:11 +00:00
|
|
|
struct virtio_pmd_ctrl *result;
|
|
|
|
struct virtqueue *vq = cvq->vq;
|
2015-05-25 10:20:52 +00:00
|
|
|
uint32_t head, i;
|
2014-05-29 07:18:20 +00:00
|
|
|
int k, sum = 0;
|
|
|
|
|
2015-05-25 10:20:52 +00:00
|
|
|
head = vq->vq_desc_head_idx;
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Format is enforced in qemu code:
|
|
|
|
* One TX packet for header;
|
|
|
|
* At least one TX packet per argument;
|
|
|
|
* One RX packet for ACK.
|
|
|
|
*/
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
|
|
|
|
vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
|
|
|
|
vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_free_cnt--;
|
2019-03-19 06:43:07 +00:00
|
|
|
i = vq->vq_split.ring.desc[head].next;
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
for (k = 0; k < pkt_num; k++) {
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
|
|
|
|
vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
|
2014-05-29 07:18:20 +00:00
|
|
|
+ sizeof(struct virtio_net_ctrl_hdr)
|
|
|
|
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_split.ring.desc[i].len = dlen[k];
|
2014-05-29 07:18:20 +00:00
|
|
|
sum += dlen[k];
|
|
|
|
vq->vq_free_cnt--;
|
2019-03-19 06:43:07 +00:00
|
|
|
i = vq->vq_split.ring.desc[i].next;
|
2014-05-29 07:18:20 +00:00
|
|
|
}
|
|
|
|
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
|
|
|
|
vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
|
2014-05-29 07:18:20 +00:00
|
|
|
+ sizeof(struct virtio_net_ctrl_hdr);
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_free_cnt--;
|
|
|
|
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
vq_update_avail_ring(vq, head);
|
|
|
|
vq_update_avail_idx(vq);
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
|
|
|
virtqueue_notify(vq);
|
|
|
|
|
2015-02-09 01:14:05 +00:00
|
|
|
rte_rmb();
|
2016-06-19 17:48:52 +00:00
|
|
|
while (VIRTQUEUE_NUSED(vq) == 0) {
|
2015-02-09 01:14:05 +00:00
|
|
|
rte_rmb();
|
2014-05-29 07:18:20 +00:00
|
|
|
usleep(100);
|
2015-02-09 01:14:05 +00:00
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-19 17:48:52 +00:00
|
|
|
while (VIRTQUEUE_NUSED(vq)) {
|
2014-05-29 07:18:20 +00:00
|
|
|
uint32_t idx, desc_idx, used_idx;
|
|
|
|
struct vring_used_elem *uep;
|
|
|
|
|
|
|
|
used_idx = (uint32_t)(vq->vq_used_cons_idx
|
|
|
|
& (vq->vq_nentries - 1));
|
2019-03-19 06:43:07 +00:00
|
|
|
uep = &vq->vq_split.ring.used->ring[used_idx];
|
2014-05-29 07:18:20 +00:00
|
|
|
idx = (uint32_t) uep->id;
|
|
|
|
desc_idx = idx;
|
|
|
|
|
2019-03-19 06:43:07 +00:00
|
|
|
while (vq->vq_split.ring.desc[desc_idx].flags &
|
|
|
|
VRING_DESC_F_NEXT) {
|
|
|
|
desc_idx = vq->vq_split.ring.desc[desc_idx].next;
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_free_cnt++;
|
|
|
|
}
|
|
|
|
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_desc_head_idx = idx;
|
|
|
|
|
|
|
|
vq->vq_used_cons_idx++;
|
|
|
|
vq->vq_free_cnt++;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
|
2014-05-29 07:18:20 +00:00
|
|
|
vq->vq_free_cnt, vq->vq_desc_head_idx);
|
|
|
|
|
2017-08-11 02:13:18 +00:00
|
|
|
result = cvq->virtio_net_hdr_mz->addr;
|
2019-03-19 06:43:11 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
|
|
|
|
int *dlen, int pkt_num)
|
|
|
|
{
|
|
|
|
virtio_net_ctrl_ack status = ~0;
|
|
|
|
struct virtio_pmd_ctrl *result;
|
|
|
|
struct virtqueue *vq;
|
|
|
|
|
|
|
|
ctrl->status = status;
|
|
|
|
|
|
|
|
if (!cvq || !cvq->vq) {
|
|
|
|
PMD_INIT_LOG(ERR, "Control queue is not supported.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_spinlock_lock(&cvq->lock);
|
|
|
|
vq = cvq->vq;
|
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
|
|
|
|
"vq->hw->cvq = %p vq = %p",
|
|
|
|
vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
|
|
|
|
|
|
|
|
if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
|
|
|
|
rte_spinlock_unlock(&cvq->lock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
|
|
|
|
sizeof(struct virtio_pmd_ctrl));
|
|
|
|
|
|
|
|
if (vtpci_packed_queue(vq->hw))
|
|
|
|
result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
|
|
|
|
else
|
|
|
|
result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2018-01-10 01:23:52 +00:00
|
|
|
rte_spinlock_unlock(&cvq->lock);
|
2017-08-11 02:13:18 +00:00
|
|
|
return result->status;
|
2014-05-29 07:18:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-05-29 07:18:20 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
|
|
|
|
memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
|
|
|
|
|
|
|
|
dlen[0] = sizeof(uint16_t);
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
|
|
|
if (ret) {
|
|
|
|
PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
|
2014-06-14 01:06:19 +00:00
|
|
|
"failed, this is too late now...");
|
2014-05-29 07:18:20 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
static void
|
|
|
|
virtio_dev_queue_release(void *queue __rte_unused)
|
2016-04-29 00:48:46 +00:00
|
|
|
{
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
/* do nothing */
|
|
|
|
}
|
2015-07-15 13:51:02 +00:00
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
static uint16_t
|
|
|
|
virtio_get_nr_vq(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
uint16_t nr_vq = hw->max_queue_pairs * 2;
|
|
|
|
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
|
|
|
|
nr_vq += 1;
|
|
|
|
|
|
|
|
return nr_vq;
|
2015-07-15 13:51:02 +00:00
|
|
|
}
|
|
|
|
|
2016-11-05 09:41:00 +00:00
|
|
|
static void
|
|
|
|
virtio_init_vring(struct virtqueue *vq)
|
|
|
|
{
|
|
|
|
int size = vq->vq_nentries;
|
|
|
|
uint8_t *ring_mem = vq->vq_ring_virt_mem;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
memset(ring_mem, 0, vq->vq_ring_size);
|
2018-12-17 21:31:32 +00:00
|
|
|
|
2016-11-05 09:41:00 +00:00
|
|
|
vq->vq_used_cons_idx = 0;
|
|
|
|
vq->vq_desc_head_idx = 0;
|
|
|
|
vq->vq_avail_idx = 0;
|
|
|
|
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
|
|
|
|
vq->vq_free_cnt = vq->vq_nentries;
|
|
|
|
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
|
2018-12-17 21:31:32 +00:00
|
|
|
if (vtpci_packed_queue(vq->hw)) {
|
2019-03-19 06:43:07 +00:00
|
|
|
vring_init_packed(&vq->vq_packed.ring, ring_mem,
|
2018-12-17 21:31:32 +00:00
|
|
|
VIRTIO_PCI_VRING_ALIGN, size);
|
|
|
|
vring_desc_init_packed(vq, size);
|
|
|
|
} else {
|
2019-03-19 06:43:07 +00:00
|
|
|
struct vring *vr = &vq->vq_split.ring;
|
|
|
|
|
2018-12-17 21:31:32 +00:00
|
|
|
vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
|
|
|
|
vring_desc_init_split(vr->desc, size);
|
|
|
|
}
|
2016-11-05 09:41:00 +00:00
|
|
|
/*
|
|
|
|
* Disable device(host) interrupting guest
|
|
|
|
*/
|
|
|
|
virtqueue_disable_intr(vq);
|
|
|
|
}
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
static int
|
|
|
|
virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
|
|
|
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
|
2016-06-01 16:12:13 +00:00
|
|
|
char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
|
|
|
|
const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
|
2015-08-28 16:23:38 +00:00
|
|
|
unsigned int vq_size, size;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
struct virtnet_rx *rxvq = NULL;
|
|
|
|
struct virtnet_tx *txvq = NULL;
|
|
|
|
struct virtnet_ctl *cvq = NULL;
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtqueue *vq;
|
2016-11-05 09:40:58 +00:00
|
|
|
size_t sz_hdr_mz = 0;
|
2016-06-01 16:12:13 +00:00
|
|
|
void *sw_ring = NULL;
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
|
2016-06-01 16:12:13 +00:00
|
|
|
int ret;
|
2018-11-27 10:54:27 +00:00
|
|
|
int numa_node = dev->device->numa_node;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2018-11-27 10:54:27 +00:00
|
|
|
PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
|
|
|
|
vtpci_queue_idx, numa_node);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the virtqueue size from the Queue Size field
|
|
|
|
* Always power of 2 and if 0 virtqueue does not exist
|
|
|
|
*/
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:17 +00:00
|
|
|
vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
|
2013-09-18 10:00:00 +00:00
|
|
|
if (vq_size == 0) {
|
2016-02-10 16:08:54 +00:00
|
|
|
PMD_INIT_LOG(ERR, "virtqueue does not exist");
|
2014-06-13 01:32:40 +00:00
|
|
|
return -EINVAL;
|
2015-06-11 15:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-10-30 09:24:20 +00:00
|
|
|
if (!vtpci_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
|
|
|
|
PMD_INIT_LOG(ERR, "split virtqueue size is not powerof 2");
|
2014-06-13 01:32:40 +00:00
|
|
|
return -EINVAL;
|
2015-06-11 15:53:27 +00:00
|
|
|
}
|
|
|
|
|
2016-11-05 09:40:57 +00:00
|
|
|
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
|
|
|
|
dev->data->port_id, vtpci_queue_idx);
|
2016-04-29 00:48:45 +00:00
|
|
|
|
2016-11-05 09:40:58 +00:00
|
|
|
size = RTE_ALIGN_CEIL(sizeof(*vq) +
|
2016-06-01 16:12:13 +00:00
|
|
|
vq_size * sizeof(struct vq_desc_extra),
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
2016-11-05 09:40:58 +00:00
|
|
|
if (queue_type == VTNET_TQ) {
|
2016-06-01 16:12:13 +00:00
|
|
|
/*
|
|
|
|
* For each xmit packet, allocate a virtio_net_hdr
|
|
|
|
* and indirect ring elements
|
|
|
|
*/
|
|
|
|
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
|
|
|
|
} else if (queue_type == VTNET_CQ) {
|
|
|
|
/* Allocate a page for control vq command, data and status */
|
|
|
|
sz_hdr_mz = PAGE_SIZE;
|
2015-10-29 14:53:22 +00:00
|
|
|
}
|
2014-05-29 07:18:19 +00:00
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
|
2018-11-27 10:54:27 +00:00
|
|
|
numa_node);
|
2016-06-01 16:12:13 +00:00
|
|
|
if (vq == NULL) {
|
|
|
|
PMD_INIT_LOG(ERR, "can not allocate vq");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
hw->vqs[vtpci_queue_idx] = vq;
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
vq->hw = hw;
|
|
|
|
vq->vq_queue_index = vtpci_queue_idx;
|
|
|
|
vq->vq_nentries = vq_size;
|
2018-12-17 21:31:32 +00:00
|
|
|
if (vtpci_packed_queue(hw)) {
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_packed.used_wrap_counter = 1;
|
2019-03-25 05:27:16 +00:00
|
|
|
vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_packed.event_flags_shadow = 0;
|
2019-03-19 06:43:06 +00:00
|
|
|
if (queue_type == VTNET_RQ)
|
2019-03-19 06:43:07 +00:00
|
|
|
vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
|
2018-12-17 21:31:32 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserve a memzone for vring elements
|
|
|
|
*/
|
2018-12-17 21:31:32 +00:00
|
|
|
size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
|
2013-09-18 10:00:00 +00:00
|
|
|
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
|
2016-06-01 16:12:13 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
|
|
|
|
size, vq->vq_ring_size);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
|
2018-11-27 10:54:27 +00:00
|
|
|
numa_node, RTE_MEMZONE_IOVA_CONTIG,
|
2018-04-11 12:29:57 +00:00
|
|
|
VIRTIO_PCI_VRING_ALIGN);
|
2013-09-18 10:00:00 +00:00
|
|
|
if (mz == NULL) {
|
2015-07-15 13:51:00 +00:00
|
|
|
if (rte_errno == EEXIST)
|
|
|
|
mz = rte_memzone_lookup(vq_name);
|
|
|
|
if (mz == NULL) {
|
2016-06-01 16:12:13 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_q_alloc;
|
2015-07-15 13:51:00 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-05-29 07:18:19 +00:00
|
|
|
|
2017-06-12 04:34:30 +00:00
|
|
|
memset(mz->addr, 0, mz->len);
|
2016-06-01 16:12:13 +00:00
|
|
|
|
2017-11-04 01:22:28 +00:00
|
|
|
vq->vq_ring_mem = mz->iova;
|
2013-09-18 10:00:00 +00:00
|
|
|
vq->vq_ring_virt_mem = mz->addr;
|
2016-06-01 16:12:13 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
|
2017-11-04 01:22:28 +00:00
|
|
|
(uint64_t)mz->iova);
|
2016-06-01 16:12:13 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
|
|
|
|
(uint64_t)(uintptr_t)mz->addr);
|
|
|
|
|
2016-11-05 09:41:00 +00:00
|
|
|
virtio_init_vring(vq);
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
if (sz_hdr_mz) {
|
2016-11-05 09:40:57 +00:00
|
|
|
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
|
|
|
|
dev->data->port_id, vtpci_queue_idx);
|
2016-06-01 16:12:13 +00:00
|
|
|
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
|
2018-11-27 10:54:27 +00:00
|
|
|
numa_node, RTE_MEMZONE_IOVA_CONTIG,
|
2018-04-11 12:29:57 +00:00
|
|
|
RTE_CACHE_LINE_SIZE);
|
2016-03-04 18:19:19 +00:00
|
|
|
if (hdr_mz == NULL) {
|
2015-07-15 13:51:00 +00:00
|
|
|
if (rte_errno == EEXIST)
|
2016-06-01 16:12:13 +00:00
|
|
|
hdr_mz = rte_memzone_lookup(vq_hdr_name);
|
2016-03-04 18:19:19 +00:00
|
|
|
if (hdr_mz == NULL) {
|
2016-06-01 16:12:13 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_q_alloc;
|
2015-07-15 13:51:00 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2016-06-01 16:12:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (queue_type == VTNET_RQ) {
|
|
|
|
size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
|
|
|
|
sizeof(vq->sw_ring[0]);
|
|
|
|
|
|
|
|
sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
|
2018-11-27 10:54:27 +00:00
|
|
|
RTE_CACHE_LINE_SIZE, numa_node);
|
2016-06-01 16:12:13 +00:00
|
|
|
if (!sw_ring) {
|
|
|
|
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail_q_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
vq->sw_ring = sw_ring;
|
2016-11-05 09:40:58 +00:00
|
|
|
rxvq = &vq->rxq;
|
2016-06-01 16:12:13 +00:00
|
|
|
rxvq->vq = vq;
|
|
|
|
rxvq->port_id = dev->data->port_id;
|
|
|
|
rxvq->mz = mz;
|
|
|
|
} else if (queue_type == VTNET_TQ) {
|
2016-11-05 09:40:58 +00:00
|
|
|
txvq = &vq->txq;
|
2016-06-01 16:12:13 +00:00
|
|
|
txvq->vq = vq;
|
|
|
|
txvq->port_id = dev->data->port_id;
|
|
|
|
txvq->mz = mz;
|
|
|
|
txvq->virtio_net_hdr_mz = hdr_mz;
|
2017-11-04 01:22:28 +00:00
|
|
|
txvq->virtio_net_hdr_mem = hdr_mz->iova;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
} else if (queue_type == VTNET_CQ) {
|
2016-11-05 09:40:58 +00:00
|
|
|
cvq = &vq->cq;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
cvq->vq = vq;
|
|
|
|
cvq->mz = mz;
|
|
|
|
cvq->virtio_net_hdr_mz = hdr_mz;
|
2017-11-04 01:22:28 +00:00
|
|
|
cvq->virtio_net_hdr_mem = hdr_mz->iova;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
|
|
|
|
hw->cvq = cvq;
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
}
|
|
|
|
|
2016-12-23 15:58:02 +00:00
|
|
|
/* For virtio_user case (that is when hw->dev is NULL), we use
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
* virtual address. And we need properly set _offset_, please see
|
2016-07-19 12:31:59 +00:00
|
|
|
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
*/
|
2017-01-12 05:37:00 +00:00
|
|
|
if (!hw->virtio_user_dev)
|
2017-10-20 12:31:32 +00:00
|
|
|
vq->offset = offsetof(struct rte_mbuf, buf_iova);
|
net/virtio: allow virtual address to fill vring descriptors
This patch is related to how to calculate relative address for vhost
backend.
The principle is that: based on one or multiple shared memory regions,
vhost maintains a reference system with the frontend start address,
backend start address, and length for each segment, so that each
frontend address (GPA, Guest Physical Address) can be translated into
vhost-recognizable backend address. To make the address translation
efficient, we need to maintain as few regions as possible. In the case
of VM, GPA is always locally continuous. But for some other case, like
virtio-user, GPA continuous is not guaranteed, therefore, we use virtual
address here.
It basically means:
a. when set_base_addr, VA address is used;
b. when preparing RX's descriptors, VA address is used;
c. when transmitting packets, VA is filled in TX's descriptors;
d. in TX and CQ's header, VA is used.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:21 +00:00
|
|
|
else {
|
|
|
|
vq->vq_ring_mem = (uintptr_t)mz->addr;
|
|
|
|
vq->offset = offsetof(struct rte_mbuf, buf_addr);
|
|
|
|
if (queue_type == VTNET_TQ)
|
|
|
|
txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
|
|
|
|
else if (queue_type == VTNET_CQ)
|
|
|
|
cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (queue_type == VTNET_TQ) {
|
|
|
|
struct virtio_tx_region *txr;
|
|
|
|
unsigned int i;
|
|
|
|
|
2016-03-04 18:19:19 +00:00
|
|
|
txr = hdr_mz->addr;
|
|
|
|
memset(txr, 0, vq_size * sizeof(*txr));
|
|
|
|
for (i = 0; i < vq_size; i++) {
|
|
|
|
struct vring_desc *start_dp = txr[i].tx_indir;
|
|
|
|
|
|
|
|
/* first indirect descriptor is always the tx header */
|
2019-03-19 06:43:09 +00:00
|
|
|
if (!vtpci_packed_queue(hw)) {
|
2018-12-17 21:31:34 +00:00
|
|
|
vring_desc_init_split(start_dp,
|
|
|
|
RTE_DIM(txr[i].tx_indir));
|
|
|
|
start_dp->addr = txvq->virtio_net_hdr_mem
|
|
|
|
+ i * sizeof(*txr)
|
|
|
|
+ offsetof(struct virtio_tx_region,
|
|
|
|
tx_hdr);
|
|
|
|
start_dp->len = hw->vtnet_hdr_size;
|
|
|
|
start_dp->flags = VRING_DESC_F_NEXT;
|
|
|
|
}
|
2016-03-04 18:19:19 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:17 +00:00
|
|
|
if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
|
2016-06-15 09:03:20 +00:00
|
|
|
PMD_INIT_LOG(ERR, "setup_queue failed");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-05-29 07:18:19 +00:00
|
|
|
return 0;
|
2016-06-01 16:12:13 +00:00
|
|
|
|
|
|
|
fail_q_alloc:
|
|
|
|
rte_free(sw_ring);
|
|
|
|
rte_memzone_free(hdr_mz);
|
|
|
|
rte_memzone_free(mz);
|
|
|
|
rte_free(vq);
|
|
|
|
|
|
|
|
return ret;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
static void
|
|
|
|
virtio_free_queues(struct virtio_hw *hw)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
uint16_t nr_vq = virtio_get_nr_vq(hw);
|
|
|
|
struct virtqueue *vq;
|
|
|
|
int queue_type;
|
|
|
|
uint16_t i;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2017-02-20 14:04:46 +00:00
|
|
|
if (hw->vqs == NULL)
|
|
|
|
return;
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
for (i = 0; i < nr_vq; i++) {
|
|
|
|
vq = hw->vqs[i];
|
|
|
|
if (!vq)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
queue_type = virtio_get_queue_type(hw, i);
|
|
|
|
if (queue_type == VTNET_RQ) {
|
|
|
|
rte_free(vq->sw_ring);
|
|
|
|
rte_memzone_free(vq->rxq.mz);
|
|
|
|
} else if (queue_type == VTNET_TQ) {
|
|
|
|
rte_memzone_free(vq->txq.mz);
|
|
|
|
rte_memzone_free(vq->txq.virtio_net_hdr_mz);
|
|
|
|
} else {
|
|
|
|
rte_memzone_free(vq->cq.mz);
|
|
|
|
rte_memzone_free(vq->cq.virtio_net_hdr_mz);
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_free(vq);
|
2017-02-20 14:04:46 +00:00
|
|
|
hw->vqs[i] = NULL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
rte_free(hw->vqs);
|
2017-02-20 14:04:46 +00:00
|
|
|
hw->vqs = NULL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
static int
|
|
|
|
virtio_alloc_queues(struct rte_eth_dev *dev)
|
2015-07-15 13:51:03 +00:00
|
|
|
{
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
uint16_t nr_vq = virtio_get_nr_vq(hw);
|
|
|
|
uint16_t i;
|
|
|
|
int ret;
|
2015-07-15 13:51:03 +00:00
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
|
|
|
|
if (!hw->vqs) {
|
|
|
|
PMD_INIT_LOG(ERR, "failed to allocate vqs");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2015-07-15 13:51:03 +00:00
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
for (i = 0; i < nr_vq; i++) {
|
|
|
|
ret = virtio_init_queue(dev, i);
|
|
|
|
if (ret < 0) {
|
|
|
|
virtio_free_queues(hw);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2015-07-15 13:51:03 +00:00
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
return 0;
|
2015-07-15 13:51:03 +00:00
|
|
|
}
|
|
|
|
|
2017-01-17 07:10:28 +00:00
|
|
|
static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
|
|
|
|
|
2014-02-12 16:44:44 +00:00
|
|
|
static void
|
|
|
|
virtio_dev_close(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:58 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2017-01-17 07:10:28 +00:00
|
|
|
struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
|
2015-02-09 01:13:58 +00:00
|
|
|
|
2014-02-12 16:44:44 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
|
|
|
|
|
2017-07-17 23:05:22 +00:00
|
|
|
if (!hw->opened)
|
|
|
|
return;
|
|
|
|
hw->opened = false;
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
/* reset the NIC */
|
2016-05-09 16:35:57 +00:00
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2017-01-17 07:10:22 +00:00
|
|
|
VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
|
2017-01-17 07:10:28 +00:00
|
|
|
if (intr_conf->rxq)
|
|
|
|
virtio_queues_unbind_intr(dev);
|
|
|
|
|
|
|
|
if (intr_conf->lsc || intr_conf->rxq) {
|
2017-11-09 09:21:24 +00:00
|
|
|
virtio_intr_disable(dev);
|
2017-01-17 07:10:28 +00:00
|
|
|
rte_intr_efd_disable(dev->intr_handle);
|
|
|
|
rte_free(dev->intr_handle->intr_vec);
|
|
|
|
dev->intr_handle->intr_vec = NULL;
|
|
|
|
}
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
vtpci_reset(hw);
|
|
|
|
virtio_dev_free_mbufs(dev);
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
virtio_free_queues(hw);
|
2019-06-05 09:43:41 +00:00
|
|
|
|
|
|
|
#ifdef RTE_VIRTIO_USER
|
|
|
|
if (hw->virtio_user_dev)
|
|
|
|
virtio_user_dev_uninit(hw->virtio_user_dev);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (dev->device) {
|
|
|
|
rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(dev));
|
|
|
|
if (!hw->modern)
|
|
|
|
rte_pci_ioport_unmap(VTPCI_IO(hw));
|
|
|
|
}
|
2014-02-12 16:44:44 +00:00
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:24 +00:00
|
|
|
static int
|
2014-11-08 04:26:15 +00:00
|
|
|
virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control");
|
2019-09-14 11:37:24 +00:00
|
|
|
return -ENOTSUP;
|
2015-06-11 15:53:24 +00:00
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
|
|
|
|
ctrl.data[0] = 1;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
2019-09-14 11:37:24 +00:00
|
|
|
if (ret) {
|
2014-11-08 04:26:15 +00:00
|
|
|
PMD_INIT_LOG(ERR, "Failed to enable promisc");
|
2019-09-14 11:37:24 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-11-08 04:26:15 +00:00
|
|
|
}
|
|
|
|
|
2019-09-14 11:37:24 +00:00
|
|
|
static int
|
2014-11-08 04:26:15 +00:00
|
|
|
virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control");
|
2019-09-14 11:37:24 +00:00
|
|
|
return -ENOTSUP;
|
2015-06-11 15:53:24 +00:00
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
|
|
|
|
ctrl.data[0] = 0;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
2019-09-14 11:37:24 +00:00
|
|
|
if (ret) {
|
2014-11-08 04:26:15 +00:00
|
|
|
PMD_INIT_LOG(ERR, "Failed to disable promisc");
|
2019-09-14 11:37:24 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-11-08 04:26:15 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 12:56:10 +00:00
|
|
|
static int
|
2014-11-08 04:26:15 +00:00
|
|
|
virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control");
|
2019-09-24 12:56:10 +00:00
|
|
|
return -ENOTSUP;
|
2015-06-11 15:53:24 +00:00
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
|
|
|
|
ctrl.data[0] = 1;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
2019-09-24 12:56:10 +00:00
|
|
|
if (ret) {
|
2014-11-08 04:26:15 +00:00
|
|
|
PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
|
2019-09-24 12:56:10 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-11-08 04:26:15 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 12:56:10 +00:00
|
|
|
static int
|
2014-11-08 04:26:15 +00:00
|
|
|
virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-11-08 04:26:15 +00:00
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int dlen[1];
|
|
|
|
int ret;
|
|
|
|
|
2015-06-11 15:53:24 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_INIT_LOG(INFO, "host does not support rx control");
|
2019-09-24 12:56:10 +00:00
|
|
|
return -ENOTSUP;
|
2015-06-11 15:53:24 +00:00
|
|
|
}
|
|
|
|
|
2014-11-08 04:26:15 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
|
|
|
|
ctrl.data[0] = 0;
|
|
|
|
dlen[0] = 1;
|
|
|
|
|
|
|
|
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
|
2019-09-24 12:56:10 +00:00
|
|
|
if (ret) {
|
2014-11-08 04:26:15 +00:00
|
|
|
PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
|
2019-09-24 12:56:10 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2014-11-08 04:26:15 +00:00
|
|
|
}
|
|
|
|
|
2016-10-09 03:38:26 +00:00
|
|
|
#define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
|
|
|
|
static int
|
|
|
|
virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2019-05-21 16:13:05 +00:00
|
|
|
uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
|
2016-10-09 03:38:26 +00:00
|
|
|
hw->vtnet_hdr_size;
|
|
|
|
uint32_t frame_size = mtu + ether_hdr_len;
|
2017-03-12 16:34:04 +00:00
|
|
|
uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
|
2016-10-09 03:38:26 +00:00
|
|
|
|
2017-03-12 16:34:04 +00:00
|
|
|
max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
|
2019-05-21 16:13:05 +00:00
|
|
|
RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
|
2016-10-09 03:38:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-17 07:10:25 +00:00
|
|
|
static int
|
|
|
|
virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
|
|
{
|
2019-03-19 06:43:05 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2017-01-17 07:10:25 +00:00
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
|
|
|
|
struct virtqueue *vq = rxvq->vq;
|
|
|
|
|
|
|
|
virtqueue_enable_intr(vq);
|
2019-03-19 06:43:05 +00:00
|
|
|
virtio_mb(hw->weak_barriers);
|
2017-01-17 07:10:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
|
|
|
|
struct virtqueue *vq = rxvq->vq;
|
|
|
|
|
|
|
|
virtqueue_disable_intr(vq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/*
|
|
|
|
* dev_ops for virtio, bare necessities for basic operation
|
|
|
|
*/
|
2015-04-07 21:21:03 +00:00
|
|
|
static const struct eth_dev_ops virtio_eth_dev_ops = {
|
2014-05-29 07:18:19 +00:00
|
|
|
.dev_configure = virtio_dev_configure,
|
|
|
|
.dev_start = virtio_dev_start,
|
|
|
|
.dev_stop = virtio_dev_stop,
|
|
|
|
.dev_close = virtio_dev_close,
|
2014-11-08 04:26:15 +00:00
|
|
|
.promiscuous_enable = virtio_dev_promiscuous_enable,
|
|
|
|
.promiscuous_disable = virtio_dev_promiscuous_disable,
|
|
|
|
.allmulticast_enable = virtio_dev_allmulticast_enable,
|
|
|
|
.allmulticast_disable = virtio_dev_allmulticast_disable,
|
2016-10-09 03:38:26 +00:00
|
|
|
.mtu_set = virtio_mtu_set,
|
2014-05-29 07:18:19 +00:00
|
|
|
.dev_infos_get = virtio_dev_info_get,
|
|
|
|
.stats_get = virtio_dev_stats_get,
|
2015-11-02 10:19:00 +00:00
|
|
|
.xstats_get = virtio_dev_xstats_get,
|
2016-06-15 15:25:32 +00:00
|
|
|
.xstats_get_names = virtio_dev_xstats_get_names,
|
2014-05-29 07:18:19 +00:00
|
|
|
.stats_reset = virtio_dev_stats_reset,
|
2015-11-02 10:19:00 +00:00
|
|
|
.xstats_reset = virtio_dev_stats_reset,
|
2014-05-29 07:18:19 +00:00
|
|
|
.link_update = virtio_dev_link_update,
|
2017-09-01 02:36:28 +00:00
|
|
|
.vlan_offload_set = virtio_dev_vlan_offload_set,
|
2014-05-29 07:18:19 +00:00
|
|
|
.rx_queue_setup = virtio_dev_rx_queue_setup,
|
2017-01-17 07:10:25 +00:00
|
|
|
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
|
|
|
|
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
.rx_queue_release = virtio_dev_queue_release,
|
2017-01-17 07:10:23 +00:00
|
|
|
.rx_descriptor_done = virtio_dev_rx_queue_done,
|
2014-05-29 07:18:19 +00:00
|
|
|
.tx_queue_setup = virtio_dev_tx_queue_setup,
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
.tx_queue_release = virtio_dev_queue_release,
|
2014-05-29 07:18:20 +00:00
|
|
|
/* collect stats per queue */
|
2014-06-13 01:32:40 +00:00
|
|
|
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
|
2015-02-09 01:14:02 +00:00
|
|
|
.vlan_filter_set = virtio_vlan_filter_set,
|
2015-02-09 01:14:03 +00:00
|
|
|
.mac_addr_add = virtio_mac_addr_add,
|
|
|
|
.mac_addr_remove = virtio_mac_addr_remove,
|
2015-02-09 01:14:04 +00:00
|
|
|
.mac_addr_set = virtio_mac_addr_set,
|
2013-09-18 10:00:00 +00:00
|
|
|
};
|
|
|
|
|
2019-03-25 04:12:15 +00:00
|
|
|
/*
|
|
|
|
* dev_ops for virtio-user in secondary processes, as we just have
|
|
|
|
* some limited supports currently.
|
|
|
|
*/
|
|
|
|
const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
|
|
|
|
.dev_infos_get = virtio_dev_info_get,
|
|
|
|
.stats_get = virtio_dev_stats_get,
|
|
|
|
.xstats_get = virtio_dev_xstats_get,
|
|
|
|
.xstats_get_names = virtio_dev_xstats_get_names,
|
|
|
|
.stats_reset = virtio_dev_stats_reset,
|
|
|
|
.xstats_reset = virtio_dev_stats_reset,
|
|
|
|
/* collect stats per queue */
|
|
|
|
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
|
|
|
|
};
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
static void
|
2015-11-02 10:19:00 +00:00
|
|
|
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2014-06-14 01:06:18 +00:00
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
const struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->opackets += txvq->stats.packets;
|
|
|
|
stats->obytes += txvq->stats.bytes;
|
2014-06-14 01:06:18 +00:00
|
|
|
|
|
|
|
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->q_opackets[i] = txvq->stats.packets;
|
|
|
|
stats->q_obytes[i] = txvq->stats.bytes;
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->ipackets += rxvq->stats.packets;
|
|
|
|
stats->ibytes += rxvq->stats.bytes;
|
|
|
|
stats->ierrors += rxvq->stats.errors;
|
2014-06-14 01:06:18 +00:00
|
|
|
|
|
|
|
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
|
2016-06-01 16:12:13 +00:00
|
|
|
stats->q_ipackets[i] = rxvq->stats.packets;
|
|
|
|
stats->q_ibytes[i] = rxvq->stats.bytes;
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 15:25:32 +00:00
|
|
|
static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
|
|
__rte_unused unsigned limit)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
unsigned count = 0;
|
|
|
|
unsigned t;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
|
|
|
|
dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
|
2016-06-15 15:25:32 +00:00
|
|
|
|
2016-06-20 10:43:32 +00:00
|
|
|
if (xstats_names != NULL) {
|
2016-06-15 15:25:32 +00:00
|
|
|
/* Note: limit checked in rte_eth_xstats_names() */
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2017-12-14 14:33:43 +00:00
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
2016-06-15 15:25:32 +00:00
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
|
2016-06-15 15:25:32 +00:00
|
|
|
snprintf(xstats_names[count].name,
|
|
|
|
sizeof(xstats_names[count].name),
|
|
|
|
"rx_q%u_%s", i,
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_rxq_stat_strings[t].name);
|
2016-06-15 15:25:32 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2017-12-14 14:33:43 +00:00
|
|
|
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
2016-06-15 15:25:32 +00:00
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
|
2016-06-15 15:25:32 +00:00
|
|
|
snprintf(xstats_names[count].name,
|
|
|
|
sizeof(xstats_names[count].name),
|
|
|
|
"tx_q%u_%s", i,
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_txq_stat_strings[t].name);
|
2016-06-15 15:25:32 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
return nstats;
|
|
|
|
}
|
|
|
|
|
2015-11-02 10:19:00 +00:00
|
|
|
static int
|
2016-06-15 15:25:33 +00:00
|
|
|
virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
2015-11-02 10:19:00 +00:00
|
|
|
unsigned n)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
unsigned count = 0;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
|
|
|
|
dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
|
2015-11-02 10:19:00 +00:00
|
|
|
|
|
|
|
if (n < nstats)
|
|
|
|
return nstats;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
2015-11-02 10:19:00 +00:00
|
|
|
|
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned t;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
|
2015-11-02 10:19:00 +00:00
|
|
|
xstats[count].value = *(uint64_t *)(((char *)rxvq) +
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_rxq_stat_strings[t].offset);
|
ethdev: fix extended statistics name index
The function rte_eth_xstats_get() return an array of tuples (id,
value). The value is the statistic counter, while the id references a
name in the array returned by rte_eth_xstats_get_name().
Today, each 'id' returned by rte_eth_xstats_get() is equal to the index
in the returned array, making this value useless. It also prevents a
driver from having different indexes for names and value, like in the
example below:
rte_eth_xstats_get_name() returns:
0: "rx0_stat"
1: "rx1_stat"
2: ...
7: "rx7_stat"
8: "tx0_stat"
9: "tx1_stat"
...
15: "tx7_stat"
rte_eth_xstats_get() returns:
0: id=0, val=<stat> ("rx0_stat")
1: id=1, val=<stat> ("rx1_stat")
2: id=8, val=<stat> ("tx0_stat")
3: id=9, val=<stat> ("tx1_stat")
This patch fixes the drivers to set the 'id' in their ethdev->xstats_get()
(except e1000 which was already doing it), and fixes ethdev by not setting
the 'id' field to the index of the table for pmd-specific stats: instead,
they should just be shifted by the max number of generic statistics.
Fixes: bd6aa172cf35 ("ethdev: fetch extended statistics with integer ids")
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Remy Horton <remy.horton@intel.com>
2016-12-16 09:44:13 +00:00
|
|
|
xstats[count].id = count;
|
2015-11-02 10:19:00 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
2015-11-02 10:19:00 +00:00
|
|
|
|
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned t;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
|
2015-11-02 10:19:00 +00:00
|
|
|
xstats[count].value = *(uint64_t *)(((char *)txvq) +
|
2016-06-01 16:12:13 +00:00
|
|
|
rte_virtio_txq_stat_strings[t].offset);
|
ethdev: fix extended statistics name index
The function rte_eth_xstats_get() return an array of tuples (id,
value). The value is the statistic counter, while the id references a
name in the array returned by rte_eth_xstats_get_name().
Today, each 'id' returned by rte_eth_xstats_get() is equal to the index
in the returned array, making this value useless. It also prevents a
driver from having different indexes for names and value, like in the
example below:
rte_eth_xstats_get_name() returns:
0: "rx0_stat"
1: "rx1_stat"
2: ...
7: "rx7_stat"
8: "tx0_stat"
9: "tx1_stat"
...
15: "tx7_stat"
rte_eth_xstats_get() returns:
0: id=0, val=<stat> ("rx0_stat")
1: id=1, val=<stat> ("rx1_stat")
2: id=8, val=<stat> ("tx0_stat")
3: id=9, val=<stat> ("tx1_stat")
This patch fixes the drivers to set the 'id' in their ethdev->xstats_get()
(except e1000 which was already doing it), and fixes ethdev by not setting
the 'id' field to the index of the table for pmd-specific stats: instead,
they should just be shifted by the max number of generic statistics.
Fixes: bd6aa172cf35 ("ethdev: fetch extended statistics with integer ids")
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Remy Horton <remy.horton@intel.com>
2016-12-16 09:44:13 +00:00
|
|
|
xstats[count].id = count;
|
2015-11-02 10:19:00 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-10-10 20:20:18 +00:00
|
|
|
static int
|
2015-11-02 10:19:00 +00:00
|
|
|
virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
|
|
|
{
|
|
|
|
virtio_update_stats(dev, stats);
|
2017-10-10 20:20:18 +00:00
|
|
|
|
|
|
|
return 0;
|
2015-11-02 10:19:00 +00:00
|
|
|
}
|
|
|
|
|
2019-09-06 14:34:54 +00:00
|
|
|
static int
|
2013-09-18 10:00:00 +00:00
|
|
|
virtio_dev_stats_reset(struct rte_eth_dev *dev)
|
|
|
|
{
|
2014-06-14 01:06:18 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_tx *txvq = dev->data->tx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (txvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
txvq->stats.packets = 0;
|
|
|
|
txvq->stats.bytes = 0;
|
|
|
|
txvq->stats.multicast = 0;
|
|
|
|
txvq->stats.broadcast = 0;
|
|
|
|
memset(txvq->stats.size_bins, 0,
|
|
|
|
sizeof(txvq->stats.size_bins[0]) * 8);
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
|
2014-06-14 01:06:18 +00:00
|
|
|
if (rxvq == NULL)
|
|
|
|
continue;
|
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
rxvq->stats.packets = 0;
|
|
|
|
rxvq->stats.bytes = 0;
|
|
|
|
rxvq->stats.errors = 0;
|
|
|
|
rxvq->stats.multicast = 0;
|
|
|
|
rxvq->stats.broadcast = 0;
|
|
|
|
memset(rxvq->stats.size_bins, 0,
|
|
|
|
sizeof(rxvq->stats.size_bins[0]) * 8);
|
2014-06-14 01:06:18 +00:00
|
|
|
}
|
2019-09-06 14:34:54 +00:00
|
|
|
|
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_set_hwaddr(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
vtpci_write_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mac),
|
2019-05-21 16:13:05 +00:00
|
|
|
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_get_hwaddr(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
|
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mac),
|
2019-05-21 16:13:05 +00:00
|
|
|
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
|
2013-09-18 10:00:00 +00:00
|
|
|
} else {
|
2019-05-21 16:13:04 +00:00
|
|
|
rte_eth_random_addr(&hw->mac_addr[0]);
|
2013-09-18 10:00:00 +00:00
|
|
|
virtio_set_hwaddr(hw);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-05 00:40:00 +00:00
|
|
|
static int
|
2015-02-09 01:14:03 +00:00
|
|
|
virtio_mac_table_set(struct virtio_hw *hw,
|
|
|
|
const struct virtio_net_ctrl_mac *uc,
|
|
|
|
const struct virtio_net_ctrl_mac *mc)
|
|
|
|
{
|
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int err, len[2];
|
|
|
|
|
2015-06-11 15:53:25 +00:00
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
|
2016-04-19 05:22:37 +00:00
|
|
|
PMD_DRV_LOG(INFO, "host does not support mac table");
|
2017-05-05 00:40:00 +00:00
|
|
|
return -1;
|
2015-06-11 15:53:25 +00:00
|
|
|
}
|
|
|
|
|
2015-02-09 01:14:03 +00:00
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
|
2015-02-09 01:14:03 +00:00
|
|
|
memcpy(ctrl.data, uc, len[0]);
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
|
2015-02-09 01:14:03 +00:00
|
|
|
memcpy(ctrl.data + len[0], mc, len[1]);
|
|
|
|
|
|
|
|
err = virtio_send_command(hw->cvq, &ctrl, len, 2);
|
|
|
|
if (err != 0)
|
|
|
|
PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
|
2017-05-05 00:40:00 +00:00
|
|
|
return err;
|
2015-02-09 01:14:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-05 00:40:00 +00:00
|
|
|
static int
|
2019-05-21 16:13:03 +00:00
|
|
|
virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
|
2015-02-09 01:14:03 +00:00
|
|
|
uint32_t index, uint32_t vmdq __rte_unused)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2019-05-21 16:13:03 +00:00
|
|
|
const struct rte_ether_addr *addrs = dev->data->mac_addrs;
|
2015-02-09 01:14:03 +00:00
|
|
|
unsigned int i;
|
|
|
|
struct virtio_net_ctrl_mac *uc, *mc;
|
|
|
|
|
|
|
|
if (index >= VIRTIO_MAX_MAC_ADDRS) {
|
|
|
|
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
|
2017-05-05 00:40:00 +00:00
|
|
|
return -EINVAL;
|
2015-02-09 01:14:03 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
|
|
|
|
sizeof(uc->entries));
|
2015-02-09 01:14:03 +00:00
|
|
|
uc->entries = 0;
|
2019-05-21 16:13:05 +00:00
|
|
|
mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
|
|
|
|
sizeof(mc->entries));
|
2015-02-09 01:14:03 +00:00
|
|
|
mc->entries = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
|
2019-05-21 16:13:03 +00:00
|
|
|
const struct rte_ether_addr *addr
|
2015-02-09 01:14:03 +00:00
|
|
|
= (i == index) ? mac_addr : addrs + i;
|
|
|
|
struct virtio_net_ctrl_mac *tbl
|
2019-05-21 16:13:04 +00:00
|
|
|
= rte_is_multicast_ether_addr(addr) ? mc : uc;
|
2015-02-09 01:14:03 +00:00
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
|
2015-02-09 01:14:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-05 00:40:00 +00:00
|
|
|
return virtio_mac_table_set(hw, uc, mc);
|
2015-02-09 01:14:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *addrs = dev->data->mac_addrs;
|
2015-02-09 01:14:03 +00:00
|
|
|
struct virtio_net_ctrl_mac *uc, *mc;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (index >= VIRTIO_MAX_MAC_ADDRS) {
|
|
|
|
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
|
|
|
|
sizeof(uc->entries));
|
2015-02-09 01:14:03 +00:00
|
|
|
uc->entries = 0;
|
2019-05-21 16:13:05 +00:00
|
|
|
mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
|
|
|
|
sizeof(mc->entries));
|
2015-02-09 01:14:03 +00:00
|
|
|
mc->entries = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
|
|
|
|
struct virtio_net_ctrl_mac *tbl;
|
|
|
|
|
2019-05-21 16:13:04 +00:00
|
|
|
if (i == index || rte_is_zero_ether_addr(addrs + i))
|
2015-02-09 01:14:03 +00:00
|
|
|
continue;
|
|
|
|
|
2019-05-21 16:13:04 +00:00
|
|
|
tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
|
2019-05-21 16:13:05 +00:00
|
|
|
memcpy(&tbl->macs[tbl->entries++], addrs + i,
|
|
|
|
RTE_ETHER_ADDR_LEN);
|
2015-02-09 01:14:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtio_mac_table_set(hw, uc, mc);
|
|
|
|
}
|
|
|
|
|
2018-04-11 16:32:51 +00:00
|
|
|
static int
|
2019-05-21 16:13:03 +00:00
|
|
|
virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
|
2015-02-09 01:14:04 +00:00
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
|
2015-02-09 01:14:04 +00:00
|
|
|
|
|
|
|
/* Use atomic update if available */
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
|
|
|
|
struct virtio_pmd_ctrl ctrl;
|
2019-05-21 16:13:05 +00:00
|
|
|
int len = RTE_ETHER_ADDR_LEN;
|
2015-02-09 01:14:04 +00:00
|
|
|
|
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
|
2018-04-11 16:32:51 +00:00
|
|
|
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
virtio_set_hwaddr(hw);
|
|
|
|
return 0;
|
2015-02-09 01:14:04 +00:00
|
|
|
}
|
|
|
|
|
2015-02-09 01:14:02 +00:00
|
|
|
static int
|
|
|
|
virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
|
|
|
|
return -ENOTSUP;
|
|
|
|
|
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
|
|
|
|
ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
|
|
|
|
memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
|
|
|
|
len = sizeof(vlan_id);
|
|
|
|
|
|
|
|
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2019-07-23 08:04:19 +00:00
|
|
|
static int
|
|
|
|
virtio_intr_unmask(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
|
|
|
if (rte_intr_ack(dev->intr_handle) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!hw->virtio_user_dev)
|
|
|
|
hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-09 09:21:24 +00:00
|
|
|
static int
|
|
|
|
virtio_intr_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
|
|
|
if (rte_intr_enable(dev->intr_handle) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!hw->virtio_user_dev)
|
|
|
|
hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_intr_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
|
|
|
if (rte_intr_disable(dev->intr_handle) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!hw->virtio_user_dev)
|
|
|
|
hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
static int
|
2016-10-13 14:16:02 +00:00
|
|
|
virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2016-02-02 13:48:16 +00:00
|
|
|
uint64_t host_features;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/* Prepare guest_features: feature that driver wants to support */
|
2016-02-02 13:48:16 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
|
2016-10-13 14:16:02 +00:00
|
|
|
req_features);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/* Read device(host) feature bits */
|
net/virtio: store PCI operators pointer locally
We used to store the vtpci_ops at virtio_hw structure. The struct,
however, is stored in shared memory. That means only one value is
allowed. For the multiple process model, however, the address of
vtpci_ops should be different among different processes.
Take virtio PMD as example, the vtpci_ops is set by the primary
process, based on its own process space. If we access that address
from the secondary process, that would be an illegal memory access,
A crash then might happen.
To make the multiple process model work, we need store the vtpci_ops
in local memory but not in a shared memory. This is what the patch
does: a local virtio_hw_internal array of size RTE_MAX_ETHPORTS is
allocated. This new structure is used to store all these kind of
info in a non-shared memory. Current, we have:
- vtpci_ops
- rte_pci_ioport
- virtio pci mapped memory, such as common_cfg.
The later two will be done in coming patches. Later patches would also
set them correctly for secondary process, so that the multiple process
model could work.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:17 +00:00
|
|
|
host_features = VTPCI_OPS(hw)->get_features(hw);
|
2016-02-02 13:48:16 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
|
2014-06-14 01:06:25 +00:00
|
|
|
host_features);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2017-03-12 16:34:04 +00:00
|
|
|
/* If supported, ensure MTU value is valid before acknowledging it. */
|
|
|
|
if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
|
|
|
|
struct virtio_net_config config;
|
|
|
|
|
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mtu),
|
|
|
|
&config.mtu, sizeof(config.mtu));
|
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
if (config.mtu < RTE_ETHER_MIN_MTU)
|
2017-03-12 16:34:04 +00:00
|
|
|
req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
|
|
|
|
}
|
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
/*
|
|
|
|
* Negotiate features: Subset of device feature bits are written back
|
|
|
|
* guest feature bits.
|
|
|
|
*/
|
2016-10-13 14:16:02 +00:00
|
|
|
hw->guest_features = req_features;
|
2014-06-14 01:06:25 +00:00
|
|
|
hw->guest_features = vtpci_negotiate_features(hw, host_features);
|
2016-02-02 13:48:16 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
|
2014-05-29 07:18:20 +00:00
|
|
|
hw->guest_features);
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
|
|
|
|
if (hw->modern) {
|
|
|
|
if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"VIRTIO_F_VERSION_1 features is not enabled.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
|
|
|
|
if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"failed to set FEATURES_OK status!");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-13 14:16:02 +00:00
|
|
|
hw->req_guest_features = req_features;
|
|
|
|
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2018-01-10 01:23:53 +00:00
|
|
|
int
|
|
|
|
virtio_dev_pause(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&hw->state_lock);
|
|
|
|
|
|
|
|
if (hw->started == 0) {
|
|
|
|
/* Device is just stopped. */
|
|
|
|
rte_spinlock_unlock(&hw->state_lock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
hw->started = 0;
|
|
|
|
/*
|
|
|
|
* Prevent the worker threads from touching queues to avoid contention,
|
|
|
|
* 1 ms should be enough for the ongoing Tx function to finish.
|
|
|
|
*/
|
|
|
|
rte_delay_ms(1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recover hw state to let the worker threads continue.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
virtio_dev_resume(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
|
|
|
hw->started = 1;
|
|
|
|
rte_spinlock_unlock(&hw->state_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Should be called only after device is paused.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
|
|
|
|
int nb_pkts)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
struct virtnet_tx *txvq = dev->data->tx_queues[0];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
hw->inject_pkts = tx_pkts;
|
|
|
|
ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
|
|
|
|
hw->inject_pkts = NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-01-18 02:20:38 +00:00
|
|
|
static void
|
|
|
|
virtio_notify_peers(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2018-03-21 03:03:40 +00:00
|
|
|
struct virtnet_rx *rxvq;
|
2018-01-18 02:20:38 +00:00
|
|
|
struct rte_mbuf *rarp_mbuf;
|
|
|
|
|
2018-03-21 03:03:40 +00:00
|
|
|
if (!dev->data->rx_queues)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rxvq = dev->data->rx_queues[0];
|
2018-05-24 15:51:01 +00:00
|
|
|
if (!rxvq)
|
|
|
|
return;
|
|
|
|
|
2018-01-18 02:20:38 +00:00
|
|
|
rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
|
2019-05-21 16:13:03 +00:00
|
|
|
(struct rte_ether_addr *)hw->mac_addr);
|
2018-01-18 02:20:38 +00:00
|
|
|
if (rarp_mbuf == NULL) {
|
|
|
|
PMD_DRV_LOG(ERR, "failed to make RARP packet.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If virtio port just stopped, no need to send RARP */
|
|
|
|
if (virtio_dev_pause(dev) < 0) {
|
|
|
|
rte_pktmbuf_free(rarp_mbuf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_inject_pkts(dev, &rarp_mbuf, 1);
|
|
|
|
virtio_dev_resume(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_ack_link_announce(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
struct virtio_pmd_ctrl ctrl;
|
|
|
|
|
|
|
|
ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
|
|
|
|
ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
|
|
|
|
|
|
|
|
virtio_send_command(hw->cvq, &ctrl, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2015-02-09 01:13:53 +00:00
|
|
|
/*
|
2018-01-18 02:20:38 +00:00
|
|
|
* Process virtio config changed interrupt. Call the callback
|
|
|
|
* if link state changed, generate gratuitous RARP packet if
|
|
|
|
* the status indicates an ANNOUNCE.
|
2015-02-09 01:13:53 +00:00
|
|
|
*/
|
2017-03-31 19:44:58 +00:00
|
|
|
void
|
2017-04-06 12:42:22 +00:00
|
|
|
virtio_interrupt_handler(void *param)
|
2015-02-09 01:13:53 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = param;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2015-02-09 01:13:53 +00:00
|
|
|
uint8_t isr;
|
2018-10-29 05:28:08 +00:00
|
|
|
uint16_t status;
|
2015-02-09 01:13:53 +00:00
|
|
|
|
|
|
|
/* Read interrupt status which clears interrupt */
|
|
|
|
isr = vtpci_isr(hw);
|
|
|
|
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
|
|
|
|
|
2019-07-23 08:04:19 +00:00
|
|
|
if (virtio_intr_unmask(dev) < 0)
|
2015-02-09 01:13:53 +00:00
|
|
|
PMD_DRV_LOG(ERR, "interrupt enable failed");
|
|
|
|
|
|
|
|
if (isr & VIRTIO_PCI_ISR_CONFIG) {
|
|
|
|
if (virtio_dev_link_update(dev, 0) == 0)
|
|
|
|
_rte_eth_dev_callback_process(dev,
|
2017-06-15 12:29:50 +00:00
|
|
|
RTE_ETH_EVENT_INTR_LSC,
|
2018-01-04 16:01:08 +00:00
|
|
|
NULL);
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2018-10-29 05:28:08 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
|
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, status),
|
|
|
|
&status, sizeof(status));
|
|
|
|
if (status & VIRTIO_NET_S_ANNOUNCE) {
|
|
|
|
virtio_notify_peers(dev);
|
|
|
|
if (hw->cvq)
|
|
|
|
virtio_ack_link_announce(dev);
|
|
|
|
}
|
|
|
|
}
|
2018-01-18 02:20:38 +00:00
|
|
|
}
|
2015-02-09 01:13:53 +00:00
|
|
|
}
|
|
|
|
|
2017-09-07 12:13:44 +00:00
|
|
|
/* set rx and tx handlers according to what is supported */
|
2015-03-27 13:23:15 +00:00
|
|
|
static void
|
2017-09-07 12:13:44 +00:00
|
|
|
set_rxtx_funcs(struct rte_eth_dev *eth_dev)
|
2015-03-27 13:23:15 +00:00
|
|
|
{
|
|
|
|
struct virtio_hw *hw = eth_dev->data->dev_private;
|
2017-09-07 12:13:44 +00:00
|
|
|
|
2019-06-17 11:31:37 +00:00
|
|
|
eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
|
2018-12-17 21:31:34 +00:00
|
|
|
if (vtpci_packed_queue(hw)) {
|
|
|
|
PMD_INIT_LOG(INFO,
|
2019-02-19 10:59:49 +00:00
|
|
|
"virtio: using packed ring %s Tx path on port %u",
|
|
|
|
hw->use_inorder_tx ? "inorder" : "standard",
|
2018-12-17 21:31:34 +00:00
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
|
|
|
|
} else {
|
|
|
|
if (hw->use_inorder_tx) {
|
|
|
|
PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
|
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
|
|
|
|
} else {
|
|
|
|
PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
|
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->tx_pkt_burst = virtio_xmit_pkts;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-17 21:31:35 +00:00
|
|
|
if (vtpci_packed_queue(hw)) {
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
|
|
|
|
PMD_INIT_LOG(INFO,
|
|
|
|
"virtio: using packed ring mergeable buffer Rx path on port %u",
|
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->rx_pkt_burst =
|
|
|
|
&virtio_recv_mergeable_pkts_packed;
|
|
|
|
} else {
|
|
|
|
PMD_INIT_LOG(INFO,
|
|
|
|
"virtio: using packed ring standard Rx path on port %u",
|
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
|
|
|
|
}
|
2017-09-07 12:13:44 +00:00
|
|
|
} else {
|
2018-12-17 21:31:35 +00:00
|
|
|
if (hw->use_simple_rx) {
|
|
|
|
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
|
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
|
|
|
|
} else if (hw->use_inorder_rx) {
|
|
|
|
PMD_INIT_LOG(INFO,
|
2018-12-20 17:27:17 +00:00
|
|
|
"virtio: using inorder Rx path on port %u",
|
2018-12-17 21:31:35 +00:00
|
|
|
eth_dev->data->port_id);
|
2018-12-20 17:27:17 +00:00
|
|
|
eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
|
2018-12-17 21:31:35 +00:00
|
|
|
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
|
|
|
|
PMD_INIT_LOG(INFO,
|
|
|
|
"virtio: using mergeable buffer Rx path on port %u",
|
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
|
|
|
|
} else {
|
|
|
|
PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
|
|
|
|
eth_dev->data->port_id);
|
|
|
|
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
|
|
|
|
}
|
2017-09-07 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
2015-03-27 13:23:15 +00:00
|
|
|
}
|
|
|
|
|
2017-01-17 08:00:03 +00:00
|
|
|
/* Only support 1:1 queue/interrupt mapping so far.
|
|
|
|
* TODO: support n:1 queue/interrupt mapping when there are limited number of
|
|
|
|
* interrupt vectors (<N+1).
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
virtio_queues_bind_intr(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_INIT_LOG(INFO, "queue/interrupt binding");
|
2017-01-17 08:00:03 +00:00
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
|
|
|
|
dev->intr_handle->intr_vec[i] = i + 1;
|
|
|
|
if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
|
|
|
|
VIRTIO_MSI_NO_VECTOR) {
|
|
|
|
PMD_DRV_LOG(ERR, "failed to set queue vector");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-17 07:10:28 +00:00
|
|
|
static void
|
|
|
|
virtio_queues_unbind_intr(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
|
2017-01-17 07:10:28 +00:00
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; ++i)
|
|
|
|
VTPCI_OPS(hw)->set_queue_irq(hw,
|
|
|
|
hw->vqs[i * VTNET_CQ],
|
|
|
|
VIRTIO_MSI_NO_VECTOR);
|
|
|
|
}
|
|
|
|
|
2017-01-17 08:00:03 +00:00
|
|
|
static int
|
|
|
|
virtio_configure_intr(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
|
|
|
|
if (!rte_intr_cap_multiple(dev->intr_handle)) {
|
|
|
|
PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
|
|
|
|
PMD_INIT_LOG(ERR, "Fail to create eventfd");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dev->intr_handle->intr_vec) {
|
|
|
|
dev->intr_handle->intr_vec =
|
|
|
|
rte_zmalloc("intr_vec",
|
|
|
|
hw->max_queue_pairs * sizeof(int), 0);
|
|
|
|
if (!dev->intr_handle->intr_vec) {
|
|
|
|
PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
|
|
|
|
hw->max_queue_pairs);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Re-register callback to update max_intr */
|
|
|
|
rte_intr_callback_unregister(dev->intr_handle,
|
|
|
|
virtio_interrupt_handler,
|
|
|
|
dev);
|
|
|
|
rte_intr_callback_register(dev->intr_handle,
|
|
|
|
virtio_interrupt_handler,
|
|
|
|
dev);
|
|
|
|
|
|
|
|
/* DO NOT try to remove this! This function will enable msix, or QEMU
|
|
|
|
* will encounter SIGSEGV when DRIVER_OK is sent.
|
|
|
|
* And for legacy devices, this should be done before queue/vec binding
|
|
|
|
* to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
|
|
|
|
* (22) will be ignored.
|
|
|
|
*/
|
2017-11-09 09:21:24 +00:00
|
|
|
if (virtio_intr_enable(dev) < 0) {
|
2017-01-17 08:00:03 +00:00
|
|
|
PMD_DRV_LOG(ERR, "interrupt enable failed");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virtio_queues_bind_intr(dev) < 0) {
|
|
|
|
PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-13 14:16:02 +00:00
|
|
|
/* reset device and renegotiate features if needed */
|
2016-10-13 14:16:00 +00:00
|
|
|
static int
|
2016-10-13 14:16:02 +00:00
|
|
|
virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = eth_dev->data->dev_private;
|
2014-05-29 07:18:20 +00:00
|
|
|
struct virtio_net_config *config;
|
|
|
|
struct virtio_net_config local_config;
|
2017-01-12 05:37:00 +00:00
|
|
|
struct rte_pci_device *pci_dev = NULL;
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
int ret;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/* Reset the device although not necessary at startup */
|
|
|
|
vtpci_reset(hw);
|
|
|
|
|
2018-01-23 15:54:41 +00:00
|
|
|
if (hw->vqs) {
|
|
|
|
virtio_dev_free_mbufs(eth_dev);
|
|
|
|
virtio_free_queues(hw);
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/* Tell the host we've noticed this device. */
|
|
|
|
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
|
|
|
|
|
|
|
|
/* Tell the host we've known how to drive the device. */
|
|
|
|
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
|
2016-10-13 14:16:02 +00:00
|
|
|
if (virtio_negotiate_features(hw, req_features) < 0)
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
return -1;
|
2014-05-29 07:18:19 +00:00
|
|
|
|
2019-01-09 14:50:15 +00:00
|
|
|
hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
|
|
|
|
|
2019-06-05 09:43:41 +00:00
|
|
|
if (!hw->virtio_user_dev)
|
2017-05-15 10:24:03 +00:00
|
|
|
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
2017-01-17 07:10:21 +00:00
|
|
|
|
2017-03-09 20:28:02 +00:00
|
|
|
/* If host does not support both status and MSI-X then disable LSC */
|
2017-11-09 09:21:24 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
|
|
|
|
hw->use_msix != VIRTIO_MSIX_NONE)
|
2016-10-13 14:16:00 +00:00
|
|
|
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
|
2017-03-09 20:28:02 +00:00
|
|
|
else
|
|
|
|
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
|
2015-08-28 16:23:37 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/* Setting up rx_header size for the device */
|
virtio: support specification 1.0
Modern (v1.0) virtio pci device defines several pci capabilities.
Each cap has a configure structure corresponding to it, and the
cap.bar and cap.offset fields tell us where to find it.
Firstly, we map the pci resources by rte_eal_pci_map_device().
We then could easily locate a cfg structure by:
cfg_addr = dev->mem_resources[cap.bar].addr + cap.offset;
Therefore, the entrance of enabling modern (v1.0) pci device support
is to iterate the pci capability lists, and to locate some configs
we care; and they are:
- common cfg
For generic virtio and virtqueue configuration, such as setting/getting
features, enabling a specific queue, and so on.
- nofity cfg
Combining with `queue_notify_off' from common cfg, we could use it to
notify a specific virt queue.
- device cfg
Where virtio_net_config structure is located.
- isr cfg
Where to read isr (interrupt status).
If any of above cap is not found, we fallback to the legacy virtio
handling.
If succeed, hw->vtpci_ops is assigned to modern_ops, where all
operations are implemented by reading/writing a (or few) specific
configuration space from above 4 cfg structures. And that's basically
how this patch works.
Besides those changes, virtio 1.0 introduces a new status field:
FEATURES_OK, which is set after features negotiation is done.
Last, set the VIRTIO_F_VERSION_1 feature flag.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Tested-by: Qian Xu <qian.q.xu@intel.com>
Reviewed-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Tested-by: Tetsuya Mukawa <mukawa@igel.co.jp>
Acked-by: Huawei Xie <huawei.xie@intel.com>
2016-02-02 13:48:19 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
|
2018-12-17 21:31:32 +00:00
|
|
|
vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
|
|
|
|
vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
|
2013-09-18 10:00:00 +00:00
|
|
|
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
|
2015-03-27 13:23:15 +00:00
|
|
|
else
|
2013-09-18 10:00:00 +00:00
|
|
|
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
|
|
|
|
|
|
|
|
/* Copy the permanent MAC address to: virtio_hw */
|
|
|
|
virtio_get_hwaddr(hw);
|
2019-05-21 16:13:04 +00:00
|
|
|
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
|
2013-09-18 10:00:00 +00:00
|
|
|
ð_dev->data->mac_addrs[0]);
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
|
2014-06-13 01:32:40 +00:00
|
|
|
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
|
|
|
|
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
|
|
|
|
config = &local_config;
|
|
|
|
|
2015-10-22 12:35:53 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mac),
|
|
|
|
&config->mac, sizeof(config->mac));
|
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
|
2015-10-22 12:35:53 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, status),
|
|
|
|
&config->status, sizeof(config->status));
|
2014-05-29 07:18:20 +00:00
|
|
|
} else {
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"VIRTIO_NET_F_STATUS is not supported");
|
2014-05-29 07:18:20 +00:00
|
|
|
config->status = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
|
2015-10-22 12:35:53 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, max_virtqueue_pairs),
|
|
|
|
&config->max_virtqueue_pairs,
|
|
|
|
sizeof(config->max_virtqueue_pairs));
|
2014-05-29 07:18:20 +00:00
|
|
|
} else {
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"VIRTIO_NET_F_MQ is not supported");
|
2014-05-29 07:18:20 +00:00
|
|
|
config->max_virtqueue_pairs = 1;
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2016-10-13 14:16:01 +00:00
|
|
|
hw->max_queue_pairs = config->max_virtqueue_pairs;
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2017-03-12 16:34:04 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
|
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, mtu),
|
|
|
|
&config->mtu,
|
|
|
|
sizeof(config->mtu));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* MTU value has already been checked at negotiation
|
|
|
|
* time, but check again in case it has changed since
|
|
|
|
* then, which should not happen.
|
|
|
|
*/
|
2019-05-21 16:13:05 +00:00
|
|
|
if (config->mtu < RTE_ETHER_MIN_MTU) {
|
2017-03-12 16:34:04 +00:00
|
|
|
PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
|
|
|
|
config->mtu);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
hw->max_mtu = config->mtu;
|
|
|
|
/* Set initial MTU to maximum one supported by vhost */
|
|
|
|
eth_dev->data->mtu = config->mtu;
|
|
|
|
|
|
|
|
} else {
|
2019-05-21 16:13:05 +00:00
|
|
|
hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
|
2017-03-12 16:34:04 +00:00
|
|
|
VLAN_TAG_LEN - hw->vtnet_hdr_size;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
|
2014-05-29 07:18:20 +00:00
|
|
|
config->max_virtqueue_pairs);
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
|
2014-05-29 07:18:20 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2014-06-14 01:06:19 +00:00
|
|
|
"PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
|
2014-05-29 07:18:20 +00:00
|
|
|
config->mac[0], config->mac[1],
|
|
|
|
config->mac[2], config->mac[3],
|
|
|
|
config->mac[4], config->mac[5]);
|
|
|
|
} else {
|
2016-10-13 14:16:01 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
|
|
|
|
hw->max_queue_pairs = 1;
|
2019-05-21 16:13:05 +00:00
|
|
|
hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
|
2018-01-05 10:28:06 +00:00
|
|
|
VLAN_TAG_LEN - hw->vtnet_hdr_size;
|
2014-05-29 07:18:20 +00:00
|
|
|
}
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
ret = virtio_alloc_queues(eth_dev);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2017-01-17 08:00:03 +00:00
|
|
|
|
|
|
|
if (eth_dev->data->dev_conf.intr_conf.rxq) {
|
|
|
|
if (virtio_configure_intr(eth_dev) < 0) {
|
|
|
|
PMD_INIT_LOG(ERR, "failed to configure interrupt");
|
2019-06-05 09:43:38 +00:00
|
|
|
virtio_free_queues(hw);
|
2017-01-17 08:00:03 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-05 09:41:02 +00:00
|
|
|
vtpci_reinit_complete(hw);
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
|
net/virtio-user: add virtual device
Add a new virtual device named virtio-user, which can be used just like
eth_ring, eth_null, etc. To reuse the code of original virtio, we do
some adjustment in virtio_ethdev.c, such as remove key _static_ of
eth_virtio_dev_init() so that it can be reused in virtual device; and
we add some check to make sure it will not crash.
Configured parameters include:
- queues (optional, 1 by default), number of queue pairs, multi-queue
not supported for now.
- cq (optional, 0 by default), not supported for now.
- mac (optional), random value will be given if not specified.
- queue_size (optional, 256 by default), size of virtqueues.
- path (madatory), path of vhost user.
When enable CONFIG_RTE_VIRTIO_USER (enabled by default), the compiled
library can be used in both VM and container environment.
Examples:
path_vhost=<path_to_vhost_user> # use vhost-user as a backend
sudo ./examples/l2fwd/build/l2fwd -c 0x100000 -n 4 \
--socket-mem 0,1024 --no-pci --file-prefix=l2fwd \
--vdev=virtio-user0,mac=00:01:02:03:04:05,path=$path_vhost -- -p 0x1
Known issues:
- Control queue and multi-queue are not supported yet.
- Cannot work with --huge-unlink.
- Cannot work with no-huge.
- Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8)
hugepages.
- Root privilege is a must (mainly becase of sorting hugepages according
to physical address).
- Applications should not use file name like HUGEFILE_FMT ("%smap_%d").
- Cannot work with vhost-net backend.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
Signed-off-by: Jianfeng Tan <jianfeng.tan@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-06-15 09:03:25 +00:00
|
|
|
if (pci_dev)
|
|
|
|
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
|
2013-09-18 10:00:00 +00:00
|
|
|
eth_dev->data->port_id, pci_dev->id.vendor_id,
|
|
|
|
pci_dev->id.device_id);
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2016-10-13 14:16:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
/*
|
|
|
|
* Remap the PCI device again (IO port map for legacy device and
|
|
|
|
* memory map for modern device), so that the secondary process
|
|
|
|
* could have the PCI initiated correctly.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
if (hw->modern) {
|
|
|
|
/*
|
|
|
|
* We don't have to re-parse the PCI config space, since
|
2017-05-04 14:48:59 +00:00
|
|
|
* rte_pci_map_device() makes sure the mapped address
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
* in secondary process would equal to the one mapped in
|
|
|
|
* the primary process: error will be returned if that
|
|
|
|
* requirement is not met.
|
|
|
|
*
|
|
|
|
* That said, we could simply reuse all cap pointers
|
|
|
|
* (such as dev_cfg, common_cfg, etc.) parsed from the
|
|
|
|
* primary process, which is stored in shared memory.
|
|
|
|
*/
|
2017-05-04 14:48:59 +00:00
|
|
|
if (rte_pci_map_device(pci_dev)) {
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
2017-05-04 14:48:59 +00:00
|
|
|
if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
virtio_set_vtpci_ops(struct virtio_hw *hw)
|
|
|
|
{
|
2017-01-17 22:13:00 +00:00
|
|
|
#ifdef RTE_VIRTIO_USER
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
if (hw->virtio_user_dev)
|
|
|
|
VTPCI_OPS(hw) = &virtio_user_ops;
|
2017-01-17 22:13:00 +00:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (hw->modern)
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
VTPCI_OPS(hw) = &modern_ops;
|
|
|
|
else
|
|
|
|
VTPCI_OPS(hw) = &legacy_ops;
|
|
|
|
}
|
|
|
|
|
2016-10-13 14:16:00 +00:00
|
|
|
/*
|
|
|
|
* This function is based on probe() function in virtio_pci.c
|
|
|
|
* It returns 0 on success.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct virtio_hw *hw = eth_dev->data->dev_private;
|
|
|
|
int ret;
|
|
|
|
|
2019-07-25 11:06:45 +00:00
|
|
|
if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"Not sufficient headroom required = %d, avail = %d",
|
|
|
|
(int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
|
|
|
|
RTE_PKTMBUF_HEADROOM);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
2016-10-13 14:16:00 +00:00
|
|
|
|
|
|
|
eth_dev->dev_ops = &virtio_eth_dev_ops;
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
if (!hw->virtio_user_dev) {
|
2017-05-15 10:24:03 +00:00
|
|
|
ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
|
net/virtio: fix multiple process support
The introduce of virtio 1.0 support brings yet another set of ops, badly,
it's not handled correctly, that it breaks the multiple process support.
The issue is the data/function pointer may vary from different processes,
and the old used to do one time set (for primary process only). That
said, the function pointer the secondary process saw is actually from the
primary process space. Accessing it could likely result to a crash.
Kudos to the last patches, we now be able to maintain those info that may
vary among different process locally, meaning every process could have its
own copy for each of them, with the correct value set. And this is what
this patch does:
- remap the PCI (IO port for legacy device and memory map for modern
device)
- set vtpci_ops correctly
After that, multiple process would work like a charm. (At least, it
passed my fuzzy test)
Fixes: b8f04520ad71 ("virtio: use PCI ioport API")
Fixes: d5bbeefca826 ("virtio: introduce PCI implementation structure")
Fixes: 6ba1f63b5ab0 ("virtio: support specification 1.0")
Cc: stable@dpdk.org
Reported-by: Juho Snellman <jsnell@iki.fi>
Reported-by: Yaron Illouz <yaroni@radcom.com>
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2017-01-06 10:16:19 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtio_set_vtpci_ops(hw);
|
2017-09-07 12:13:44 +00:00
|
|
|
set_rxtx_funcs(eth_dev);
|
|
|
|
|
2016-10-13 14:16:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-05 09:43:41 +00:00
|
|
|
/*
|
|
|
|
* Pass the information to the rte_eth_dev_close() that it should also
|
|
|
|
* release the private port resources.
|
|
|
|
*/
|
|
|
|
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
|
|
|
|
|
2016-10-13 14:16:00 +00:00
|
|
|
/* Allocate memory for storing MAC addresses */
|
2019-05-21 16:13:05 +00:00
|
|
|
eth_dev->data->mac_addrs = rte_zmalloc("virtio",
|
|
|
|
VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
|
2016-10-13 14:16:00 +00:00
|
|
|
if (eth_dev->data->mac_addrs == NULL) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"Failed to allocate %d bytes needed to store MAC addresses",
|
2019-05-21 16:13:05 +00:00
|
|
|
VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
|
2016-10-13 14:16:00 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-01-22 08:47:00 +00:00
|
|
|
hw->port_id = eth_dev->data->port_id;
|
2016-12-23 15:58:02 +00:00
|
|
|
/* For virtio_user case the hw->virtio_user_dev is populated by
|
|
|
|
* virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
|
|
|
|
*/
|
|
|
|
if (!hw->virtio_user_dev) {
|
2017-05-15 10:24:03 +00:00
|
|
|
ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
|
2016-10-13 14:16:00 +00:00
|
|
|
if (ret)
|
2019-06-05 09:43:40 +00:00
|
|
|
goto err_vtpci_init;
|
2016-10-13 14:16:00 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 06:13:58 +00:00
|
|
|
rte_spinlock_init(&hw->state_lock);
|
|
|
|
|
2016-10-13 14:16:02 +00:00
|
|
|
/* reset device and negotiate default features */
|
2016-10-13 14:16:08 +00:00
|
|
|
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
|
2016-10-13 14:16:00 +00:00
|
|
|
if (ret < 0)
|
2019-06-05 09:43:40 +00:00
|
|
|
goto err_virtio_init;
|
2016-10-13 14:16:00 +00:00
|
|
|
|
2019-06-05 09:43:41 +00:00
|
|
|
hw->opened = true;
|
|
|
|
|
2014-05-29 07:18:19 +00:00
|
|
|
return 0;
|
2017-10-27 03:54:09 +00:00
|
|
|
|
2019-06-05 09:43:40 +00:00
|
|
|
err_virtio_init:
|
|
|
|
if (!hw->virtio_user_dev) {
|
|
|
|
rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
|
|
|
|
if (!hw->modern)
|
|
|
|
rte_pci_ioport_unmap(VTPCI_IO(hw));
|
|
|
|
}
|
|
|
|
err_vtpci_init:
|
2017-10-27 03:54:09 +00:00
|
|
|
rte_free(eth_dev->data->mac_addrs);
|
2019-04-15 14:48:18 +00:00
|
|
|
eth_dev->data->mac_addrs = NULL;
|
2017-10-27 03:54:09 +00:00
|
|
|
return ret;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-15 13:51:00 +00:00
|
|
|
static int
|
|
|
|
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
|
2018-10-16 00:16:31 +00:00
|
|
|
return 0;
|
2015-07-15 13:51:00 +00:00
|
|
|
|
2016-11-05 09:41:03 +00:00
|
|
|
virtio_dev_stop(eth_dev);
|
|
|
|
virtio_dev_close(eth_dev);
|
2015-07-15 13:51:00 +00:00
|
|
|
|
|
|
|
eth_dev->dev_ops = NULL;
|
|
|
|
eth_dev->tx_pkt_burst = NULL;
|
|
|
|
eth_dev->rx_pkt_burst = NULL;
|
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-17 07:06:22 +00:00
|
|
|
static int vdpa_check_handler(__rte_unused const char *key,
|
|
|
|
const char *value, __rte_unused void *opaque)
|
|
|
|
{
|
|
|
|
if (strcmp(value, "1"))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vdpa_mode_selected(struct rte_devargs *devargs)
|
|
|
|
{
|
|
|
|
struct rte_kvargs *kvlist;
|
|
|
|
const char *key = "vdpa";
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (devargs == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
kvlist = rte_kvargs_parse(devargs->args, NULL);
|
|
|
|
if (kvlist == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!rte_kvargs_count(kvlist, key))
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
/* vdpa mode selected when there's a key-value pair: vdpa=1 */
|
|
|
|
if (rte_kvargs_process(kvlist, key,
|
|
|
|
vdpa_check_handler, NULL) < 0) {
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
ret = 1;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
rte_kvargs_free(kvlist);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-04-11 15:44:24 +00:00
|
|
|
static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
|
|
|
struct rte_pci_device *pci_dev)
|
|
|
|
{
|
2018-04-17 07:06:22 +00:00
|
|
|
/* virtio pmd skips probe if device needs to work in vdpa mode */
|
|
|
|
if (vdpa_mode_selected(pci_dev->device.devargs))
|
|
|
|
return 1;
|
|
|
|
|
2017-04-11 15:44:24 +00:00
|
|
|
return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
|
|
|
|
eth_virtio_dev_init);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
|
|
|
|
{
|
2019-06-05 09:43:41 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
|
|
|
|
/* Port has already been released by close. */
|
|
|
|
if (ret == -ENODEV)
|
|
|
|
ret = 0;
|
|
|
|
return ret;
|
2017-04-11 15:44:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct rte_pci_driver rte_virtio_pmd = {
|
|
|
|
.driver = {
|
|
|
|
.name = "net_virtio",
|
2013-09-18 10:00:00 +00:00
|
|
|
},
|
2017-04-11 15:44:24 +00:00
|
|
|
.id_table = pci_id_virtio_map,
|
|
|
|
.drv_flags = 0,
|
|
|
|
.probe = eth_virtio_pci_probe,
|
|
|
|
.remove = eth_virtio_pci_remove,
|
2013-09-18 10:00:00 +00:00
|
|
|
};
|
|
|
|
|
2018-06-18 12:32:21 +00:00
|
|
|
RTE_INIT(rte_virtio_pmd_init)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2018-11-23 15:39:20 +00:00
|
|
|
rte_eal_iopl_init();
|
2017-05-04 14:48:59 +00:00
|
|
|
rte_pci_register(&rte_virtio_pmd);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2018-07-02 15:25:47 +00:00
|
|
|
static bool
|
|
|
|
rx_offload_enabled(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
|
|
|
|
vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
|
|
|
|
vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
tx_offload_enabled(struct virtio_hw *hw)
|
|
|
|
{
|
|
|
|
return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
|
|
|
|
vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
|
|
|
|
vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/*
|
|
|
|
* Configure virtio device
|
|
|
|
* It returns 0 on success.
|
|
|
|
*/
|
|
|
|
static int
|
2014-06-14 01:06:22 +00:00
|
|
|
virtio_dev_configure(struct rte_eth_dev *dev)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
2014-06-14 01:06:22 +00:00
|
|
|
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
2018-07-02 15:25:45 +00:00
|
|
|
const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2019-05-21 16:13:05 +00:00
|
|
|
uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
|
2019-02-05 11:17:02 +00:00
|
|
|
hw->vtnet_hdr_size;
|
2018-03-09 00:32:16 +00:00
|
|
|
uint64_t rx_offloads = rxmode->offloads;
|
2018-07-02 15:25:45 +00:00
|
|
|
uint64_t tx_offloads = txmode->offloads;
|
2017-09-07 12:13:38 +00:00
|
|
|
uint64_t req_features;
|
2017-07-31 07:56:44 +00:00
|
|
|
int ret;
|
2014-06-14 01:06:22 +00:00
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "configure");
|
2017-09-07 12:13:38 +00:00
|
|
|
req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
|
2017-07-07 19:52:49 +00:00
|
|
|
|
2019-10-09 12:32:07 +00:00
|
|
|
if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
|
|
|
|
PMD_DRV_LOG(ERR,
|
|
|
|
"Unsupported Rx multi queue mode %d",
|
|
|
|
rxmode->mq_mode);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-09 12:32:08 +00:00
|
|
|
if (txmode->mq_mode != ETH_MQ_TX_NONE) {
|
|
|
|
PMD_DRV_LOG(ERR,
|
|
|
|
"Unsupported Tx multi queue mode %d",
|
|
|
|
txmode->mq_mode);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-07-31 07:56:44 +00:00
|
|
|
if (dev->data->dev_conf.intr_conf.rxq) {
|
|
|
|
ret = virtio_init_device(dev, hw->req_guest_features);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-05 11:17:02 +00:00
|
|
|
if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
|
|
|
|
req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
|
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
|
|
|
|
DEV_RX_OFFLOAD_TCP_CKSUM))
|
2017-09-07 12:13:39 +00:00
|
|
|
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
|
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
|
2017-09-07 12:13:38 +00:00
|
|
|
req_features |=
|
|
|
|
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
|
|
|
|
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
|
2016-10-13 14:16:02 +00:00
|
|
|
|
2018-07-02 15:25:45 +00:00
|
|
|
if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
|
|
|
|
DEV_TX_OFFLOAD_TCP_CKSUM))
|
|
|
|
req_features |= (1ULL << VIRTIO_NET_F_CSUM);
|
|
|
|
|
|
|
|
if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
|
|
|
|
req_features |=
|
|
|
|
(1ULL << VIRTIO_NET_F_HOST_TSO4) |
|
|
|
|
(1ULL << VIRTIO_NET_F_HOST_TSO6);
|
|
|
|
|
2017-09-07 12:13:38 +00:00
|
|
|
/* if request features changed, reinit the device */
|
|
|
|
if (req_features != hw->req_guest_features) {
|
|
|
|
ret = virtio_init_device(dev, req_features);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
|
|
|
|
DEV_RX_OFFLOAD_TCP_CKSUM)) &&
|
2017-09-07 12:13:39 +00:00
|
|
|
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
|
2017-09-07 12:13:41 +00:00
|
|
|
PMD_DRV_LOG(ERR,
|
2017-09-07 12:13:39 +00:00
|
|
|
"rx checksum not available on this host");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
|
2017-09-07 12:13:38 +00:00
|
|
|
(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
|
2017-12-11 05:13:30 +00:00
|
|
|
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
|
2017-09-07 12:13:41 +00:00
|
|
|
PMD_DRV_LOG(ERR,
|
2017-09-07 12:13:38 +00:00
|
|
|
"Large Receive Offload not available on this host");
|
2016-10-13 14:16:10 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
net/virtio: allocate queue at init stage
Queue allocation should be done once, since the queue related info (such
as vring addreess) will only be informed to the vhost-user backend once
without virtio device reset.
That means, if you allocate queues again after the vhost-user negotiation,
the vhost-user backend will not be informed any more. Leading to a state
that the vring info mismatches between virtio PMD driver and vhost-backend:
the driver switches to the new address has just been allocated, while the
vhost-backend still sticks to the old address has been assigned in the init
stage.
Unfortunately, that is exactly how the virtio driver is coded so far: queue
allocation is done at queue_setup stage (when rte_eth_tx/rx_queue_setup is
invoked). This is wrong, because queue_setup can be invoked several times.
For example,
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 1 # just trigger the queue_setup callback again
> port config all rxq 1
> port start 0
The right way to do is allocate the queues in the init stage, so that the
vring info could be persistent with the vhost-user backend.
Besides that, we should allocate max_queue pairs the device supports, but
not nr queue pairs firstly configured, to make following case work.
$ start_testpmd.sh ... --txq=1 --rxq=1 ...
> port stop 0
> port config all txq 2
> port config all rxq 2
> port start 0
Since the allocation is switched to init stage, the free should also
moved from the rx/tx_queue_release to dev close stage. That leading we
could do nothing an empty rx/tx_queue_release() implementation.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
2016-11-05 09:40:59 +00:00
|
|
|
/* start control queue */
|
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
|
2016-10-13 14:16:01 +00:00
|
|
|
virtio_dev_cq_start(dev);
|
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
|
|
|
|
hw->vlan_strip = 1;
|
2015-02-09 01:13:55 +00:00
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
|
2015-02-09 01:14:02 +00:00
|
|
|
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
|
2017-09-07 12:13:41 +00:00
|
|
|
PMD_DRV_LOG(ERR,
|
2015-02-09 01:14:02 +00:00
|
|
|
"vlan filtering not available on this host");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2018-07-02 15:25:47 +00:00
|
|
|
hw->has_tx_offload = tx_offload_enabled(hw);
|
|
|
|
hw->has_rx_offload = rx_offload_enabled(hw);
|
|
|
|
|
2016-05-09 16:35:57 +00:00
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
2017-01-17 07:10:22 +00:00
|
|
|
/* Enable vector (0) for Link State Intrerrupt */
|
|
|
|
if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
|
|
|
|
VIRTIO_MSI_NO_VECTOR) {
|
2015-02-09 01:14:06 +00:00
|
|
|
PMD_DRV_LOG(ERR, "failed to set config vector");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2017-09-07 12:13:46 +00:00
|
|
|
hw->use_simple_rx = 1;
|
2017-09-07 12:13:44 +00:00
|
|
|
|
2018-07-02 13:56:41 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
|
|
|
|
hw->use_inorder_tx = 1;
|
2018-12-20 17:27:17 +00:00
|
|
|
hw->use_inorder_rx = 1;
|
|
|
|
hw->use_simple_rx = 0;
|
2018-07-02 13:56:41 +00:00
|
|
|
}
|
|
|
|
|
2018-12-17 21:31:35 +00:00
|
|
|
if (vtpci_packed_queue(hw)) {
|
|
|
|
hw->use_simple_rx = 0;
|
|
|
|
hw->use_inorder_rx = 0;
|
|
|
|
}
|
|
|
|
|
2017-12-14 14:32:13 +00:00
|
|
|
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
|
2017-09-07 12:13:46 +00:00
|
|
|
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
|
|
|
|
hw->use_simple_rx = 0;
|
|
|
|
}
|
2017-09-07 12:13:44 +00:00
|
|
|
#endif
|
2017-09-07 12:13:46 +00:00
|
|
|
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
|
2018-07-02 13:56:41 +00:00
|
|
|
hw->use_simple_rx = 0;
|
2017-09-07 12:13:46 +00:00
|
|
|
}
|
2017-09-07 12:13:44 +00:00
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
|
2018-07-02 15:25:46 +00:00
|
|
|
DEV_RX_OFFLOAD_TCP_CKSUM |
|
|
|
|
DEV_RX_OFFLOAD_TCP_LRO |
|
|
|
|
DEV_RX_OFFLOAD_VLAN_STRIP))
|
2017-09-07 12:13:47 +00:00
|
|
|
hw->use_simple_rx = 0;
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_dev_start(struct rte_eth_dev *dev)
|
|
|
|
{
|
2014-05-29 07:18:20 +00:00
|
|
|
uint16_t nb_queues, i;
|
2016-06-01 16:12:13 +00:00
|
|
|
struct virtnet_rx *rxvq;
|
|
|
|
struct virtnet_tx *txvq __rte_unused;
|
2016-11-07 09:25:06 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2017-09-07 12:13:43 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Finish the initialization of the queues */
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
|
|
ret = virtio_dev_rx_queue_setup_finish(dev, i);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
|
|
ret = virtio_dev_tx_queue_setup_finish(dev, i);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-02-09 01:13:53 +00:00
|
|
|
/* check if lsc interrupt feature is enabled */
|
2015-08-28 16:23:37 +00:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc) {
|
2016-05-09 16:35:57 +00:00
|
|
|
if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
|
2015-02-09 01:13:53 +00:00
|
|
|
PMD_DRV_LOG(ERR, "link status not supported by host");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2017-01-17 07:10:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable uio/vfio intr/eventfd mapping: althrough we already did that
|
|
|
|
* in device configure, but it could be unmapped when device is
|
|
|
|
* stopped.
|
|
|
|
*/
|
|
|
|
if (dev->data->dev_conf.intr_conf.lsc ||
|
|
|
|
dev->data->dev_conf.intr_conf.rxq) {
|
2017-11-09 09:21:24 +00:00
|
|
|
virtio_intr_disable(dev);
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2018-10-31 18:39:43 +00:00
|
|
|
/* Setup interrupt callback */
|
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
|
|
|
rte_intr_callback_register(dev->intr_handle,
|
|
|
|
virtio_interrupt_handler,
|
|
|
|
dev);
|
|
|
|
|
2017-11-09 09:21:24 +00:00
|
|
|
if (virtio_intr_enable(dev) < 0) {
|
2015-02-09 01:13:53 +00:00
|
|
|
PMD_DRV_LOG(ERR, "interrupt enable failed");
|
|
|
|
return -EIO;
|
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/*Notify the backend
|
|
|
|
*Otherwise the tap backend might already stop its queue due to fullness.
|
|
|
|
*vhost backend will have no chance to be waked up
|
|
|
|
*/
|
2016-11-05 09:41:04 +00:00
|
|
|
nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
|
2016-11-07 09:25:06 +00:00
|
|
|
if (hw->max_queue_pairs > 1) {
|
2014-05-29 07:18:20 +00:00
|
|
|
if (virtio_set_multiple_queues(dev, nb_queues) != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-11-05 09:41:04 +00:00
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
2016-06-01 16:12:13 +00:00
|
|
|
rxvq = dev->data->rx_queues[i];
|
2017-10-20 02:09:28 +00:00
|
|
|
/* Flush the old packets */
|
2017-12-11 05:13:29 +00:00
|
|
|
virtqueue_rxvq_flush(rxvq->vq);
|
2016-06-01 16:12:13 +00:00
|
|
|
virtqueue_notify(rxvq->vq);
|
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2017-08-01 16:17:36 +00:00
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
|
|
txvq = dev->data->tx_queues[i];
|
|
|
|
virtqueue_notify(txvq->vq);
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
|
|
rxvq = dev->data->rx_queues[i];
|
|
|
|
VIRTQUEUE_DUMP(rxvq->vq);
|
|
|
|
}
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2016-06-01 16:12:13 +00:00
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
|
|
txvq = dev->data->tx_queues[i];
|
|
|
|
VIRTQUEUE_DUMP(txvq->vq);
|
|
|
|
}
|
2017-04-27 07:35:39 +00:00
|
|
|
|
2017-09-07 12:13:44 +00:00
|
|
|
set_rxtx_funcs(dev);
|
2017-07-17 23:05:22 +00:00
|
|
|
hw->started = true;
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2017-04-27 07:35:39 +00:00
|
|
|
/* Initialize Link state */
|
|
|
|
virtio_dev_link_update(dev, 0);
|
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
|
|
|
|
{
|
2018-01-23 15:54:42 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
|
|
|
uint16_t nr_vq = virtio_get_nr_vq(hw);
|
|
|
|
const char *type __rte_unused;
|
|
|
|
unsigned int i, mbuf_num = 0;
|
|
|
|
struct virtqueue *vq;
|
2014-05-29 07:18:20 +00:00
|
|
|
struct rte_mbuf *buf;
|
2018-01-23 15:54:42 +00:00
|
|
|
int queue_type;
|
2016-06-01 16:12:13 +00:00
|
|
|
|
2018-02-03 14:55:23 +00:00
|
|
|
if (hw->vqs == NULL)
|
|
|
|
return;
|
|
|
|
|
2018-01-23 15:54:42 +00:00
|
|
|
for (i = 0; i < nr_vq; i++) {
|
|
|
|
vq = hw->vqs[i];
|
|
|
|
if (!vq)
|
2018-01-23 15:54:41 +00:00
|
|
|
continue;
|
|
|
|
|
2018-01-23 15:54:42 +00:00
|
|
|
queue_type = virtio_get_queue_type(hw, i);
|
|
|
|
if (queue_type == VTNET_RQ)
|
|
|
|
type = "rxq";
|
|
|
|
else if (queue_type == VTNET_TQ)
|
|
|
|
type = "txq";
|
|
|
|
else
|
2018-01-23 15:54:41 +00:00
|
|
|
continue;
|
|
|
|
|
2014-06-13 01:32:40 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2018-01-23 15:54:42 +00:00
|
|
|
"Before freeing %s[%d] used and unused buf",
|
|
|
|
type, i);
|
|
|
|
VIRTQUEUE_DUMP(vq);
|
2014-05-29 07:18:20 +00:00
|
|
|
|
2018-01-23 15:54:43 +00:00
|
|
|
while ((buf = virtqueue_detach_unused(vq)) != NULL) {
|
2014-08-14 08:54:35 +00:00
|
|
|
rte_pktmbuf_free(buf);
|
2014-05-29 07:18:20 +00:00
|
|
|
mbuf_num++;
|
|
|
|
}
|
|
|
|
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG,
|
2018-01-23 15:54:42 +00:00
|
|
|
"After freeing %s[%d] used and unused buf",
|
|
|
|
type, i);
|
|
|
|
VIRTQUEUE_DUMP(vq);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2018-01-23 15:54:42 +00:00
|
|
|
|
|
|
|
PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-02-09 01:13:58 +00:00
|
|
|
* Stop device: disable interrupt and mark link down
|
2013-09-18 10:00:00 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
virtio_dev_stop(struct rte_eth_dev *dev)
|
|
|
|
{
|
2017-04-14 06:36:45 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2015-02-09 01:13:58 +00:00
|
|
|
struct rte_eth_link link;
|
2017-01-17 07:10:27 +00:00
|
|
|
struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "stop");
|
|
|
|
|
2018-01-10 01:23:53 +00:00
|
|
|
rte_spinlock_lock(&hw->state_lock);
|
2017-07-17 23:05:22 +00:00
|
|
|
if (!hw->started)
|
|
|
|
goto out_unlock;
|
|
|
|
hw->started = false;
|
|
|
|
|
2018-10-31 18:39:43 +00:00
|
|
|
if (intr_conf->lsc || intr_conf->rxq) {
|
2017-11-09 09:21:24 +00:00
|
|
|
virtio_intr_disable(dev);
|
2015-02-09 01:13:58 +00:00
|
|
|
|
2018-10-31 18:39:43 +00:00
|
|
|
/* Reset interrupt callback */
|
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
|
|
|
|
rte_intr_callback_unregister(dev->intr_handle,
|
|
|
|
virtio_interrupt_handler,
|
|
|
|
dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-09 01:13:58 +00:00
|
|
|
memset(&link, 0, sizeof(link));
|
2018-01-26 02:01:39 +00:00
|
|
|
rte_eth_linkstatus_set(dev, &link);
|
2017-07-17 23:05:22 +00:00
|
|
|
out_unlock:
|
2018-01-10 01:23:53 +00:00
|
|
|
rte_spinlock_unlock(&hw->state_lock);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
|
|
|
|
{
|
2018-01-26 02:01:39 +00:00
|
|
|
struct rte_eth_link link;
|
2013-09-18 10:00:00 +00:00
|
|
|
uint16_t status;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2018-01-26 02:01:39 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
memset(&link, 0, sizeof(link));
|
2016-03-31 22:12:25 +00:00
|
|
|
link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
2017-12-26 09:25:00 +00:00
|
|
|
link.link_speed = ETH_SPEED_NUM_10G;
|
2018-01-26 02:01:39 +00:00
|
|
|
link.link_autoneg = ETH_LINK_FIXED;
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2017-07-17 23:05:22 +00:00
|
|
|
if (!hw->started) {
|
2017-04-14 06:36:45 +00:00
|
|
|
link.link_status = ETH_LINK_DOWN;
|
|
|
|
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Get link status from hw");
|
2013-09-18 10:00:00 +00:00
|
|
|
vtpci_read_dev_config(hw,
|
|
|
|
offsetof(struct virtio_net_config, status),
|
|
|
|
&status, sizeof(status));
|
2014-06-13 01:32:40 +00:00
|
|
|
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
|
2016-03-31 22:12:24 +00:00
|
|
|
link.link_status = ETH_LINK_DOWN;
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Port %d is down",
|
2014-06-13 01:32:40 +00:00
|
|
|
dev->data->port_id);
|
2013-09-18 10:00:00 +00:00
|
|
|
} else {
|
2016-03-31 22:12:24 +00:00
|
|
|
link.link_status = ETH_LINK_UP;
|
2014-06-14 01:06:19 +00:00
|
|
|
PMD_INIT_LOG(DEBUG, "Port %d is up",
|
2014-06-13 01:32:40 +00:00
|
|
|
dev->data->port_id);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
2016-03-31 22:12:24 +00:00
|
|
|
link.link_status = ETH_LINK_UP;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2015-02-09 01:13:53 +00:00
|
|
|
|
2018-01-26 02:01:39 +00:00
|
|
|
return rte_eth_linkstatus_set(dev, &link);
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
2017-09-01 02:36:28 +00:00
|
|
|
static int
|
|
|
|
virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
|
|
|
{
|
|
|
|
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2018-03-09 00:32:16 +00:00
|
|
|
uint64_t offloads = rxmode->offloads;
|
2017-09-01 02:36:28 +00:00
|
|
|
|
|
|
|
if (mask & ETH_VLAN_FILTER_MASK) {
|
2018-03-09 00:32:16 +00:00
|
|
|
if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
|
2017-09-01 02:36:28 +00:00
|
|
|
!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
|
|
|
|
|
|
|
|
PMD_DRV_LOG(NOTICE,
|
|
|
|
"vlan filtering not available on this host");
|
|
|
|
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & ETH_VLAN_STRIP_MASK)
|
2018-03-09 00:32:16 +00:00
|
|
|
hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
|
2017-09-01 02:36:28 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-12 16:42:28 +00:00
|
|
|
static int
|
2013-09-18 10:00:00 +00:00
|
|
|
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
|
|
|
{
|
2017-01-17 10:35:53 +00:00
|
|
|
uint64_t tso_mask, host_features;
|
2015-02-09 01:13:56 +00:00
|
|
|
struct virtio_hw *hw = dev->data->dev_private;
|
2014-06-13 01:32:40 +00:00
|
|
|
|
2017-02-02 12:05:07 +00:00
|
|
|
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
|
|
|
|
|
2016-10-13 14:16:01 +00:00
|
|
|
dev_info->max_rx_queues =
|
|
|
|
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
|
|
|
|
dev_info->max_tx_queues =
|
|
|
|
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
|
2013-09-18 10:00:00 +00:00
|
|
|
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
|
|
|
|
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
|
|
|
|
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
|
2016-10-13 14:16:09 +00:00
|
|
|
|
2017-01-17 10:35:53 +00:00
|
|
|
host_features = VTPCI_OPS(hw)->get_features(hw);
|
2018-09-04 10:12:56 +00:00
|
|
|
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
|
2019-02-05 11:17:02 +00:00
|
|
|
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
2017-01-17 10:35:53 +00:00
|
|
|
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
|
|
|
|
dev_info->rx_offload_capa |=
|
|
|
|
DEV_RX_OFFLOAD_TCP_CKSUM |
|
|
|
|
DEV_RX_OFFLOAD_UDP_CKSUM;
|
|
|
|
}
|
2018-03-09 00:32:16 +00:00
|
|
|
if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
|
|
|
|
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
|
2017-01-17 10:35:53 +00:00
|
|
|
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
|
|
|
|
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
|
2017-09-07 12:13:38 +00:00
|
|
|
if ((host_features & tso_mask) == tso_mask)
|
|
|
|
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
|
2017-01-17 10:35:53 +00:00
|
|
|
|
2018-03-09 00:32:16 +00:00
|
|
|
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
|
|
|
|
DEV_TX_OFFLOAD_VLAN_INSERT;
|
2018-07-02 15:25:45 +00:00
|
|
|
if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
|
2016-10-13 14:16:09 +00:00
|
|
|
dev_info->tx_offload_capa |=
|
|
|
|
DEV_TX_OFFLOAD_UDP_CKSUM |
|
|
|
|
DEV_TX_OFFLOAD_TCP_CKSUM;
|
|
|
|
}
|
2016-10-13 14:16:11 +00:00
|
|
|
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
|
|
|
|
(1ULL << VIRTIO_NET_F_HOST_TSO6);
|
2018-07-02 15:25:45 +00:00
|
|
|
if ((host_features & tso_mask) == tso_mask)
|
2016-10-13 14:16:11 +00:00
|
|
|
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
|
2019-09-12 16:42:28 +00:00
|
|
|
|
|
|
|
return 0;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-04-21 14:59:37 +00:00
|
|
|
|
2014-05-29 07:18:20 +00:00
|
|
|
/*
|
|
|
|
* It enables testpmd to collect per queue stats.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
|
|
|
|
__rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
|
|
|
|
__rte_unused uint8_t is_rx)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-10 05:43:15 +00:00
|
|
|
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
|
|
|
|
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
|
2017-05-20 13:12:37 +00:00
|
|
|
RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
|
2017-12-19 06:38:35 +00:00
|
|
|
|
2018-06-18 12:32:21 +00:00
|
|
|
RTE_INIT(virtio_init_log)
|
2017-12-19 06:38:35 +00:00
|
|
|
{
|
2018-01-25 09:01:09 +00:00
|
|
|
virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
|
2017-12-19 06:38:35 +00:00
|
|
|
if (virtio_logtype_init >= 0)
|
|
|
|
rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
|
2018-01-25 09:01:09 +00:00
|
|
|
virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
|
2017-12-19 06:38:35 +00:00
|
|
|
if (virtio_logtype_driver >= 0)
|
|
|
|
rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
|
|
|
|
}
|