numam-dpdk/drivers/net/netvsc/hn_vf.c
Stephen Hemminger 7415ad0cdc net/netvsc: fix VF link update
The netvsc device calls VF (if present) to update the link status
with the wrong device. This leads to errors in mlx5 device when it
can't find the ifindex.

Fixes: dc7680e859 ("net/netvsc: support integrated VF")

Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
2018-11-05 20:10:54 +01:00

550 lines
13 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2018 Microsoft Corp.
* All rights reserved.
*/
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/types.h>
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ethdev_driver.h>
#include <rte_lcore.h>
#include <rte_memory.h>
#include <rte_bus_vmbus.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_log.h>
#include <rte_string_fns.h>
#include "hn_logs.h"
#include "hn_var.h"
#include "hn_nvs.h"
/* Search for VF with matching MAC address, return port id */
static int hn_vf_match(const struct rte_eth_dev *dev)
{
const struct ether_addr *mac = dev->data->mac_addrs;
char buf[32];
int i;
ether_format_addr(buf, sizeof(buf), mac);
RTE_ETH_FOREACH_DEV(i) {
const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
if (vf_dev == dev)
continue;
ether_format_addr(buf, sizeof(buf), vf_mac);
if (is_same_ether_addr(mac, vf_mac))
return i;
}
return -ENOENT;
}
/*
* Attach new PCI VF device and return the port_id
*/
static int hn_vf_attach(struct hn_data *hv, uint16_t port_id,
struct rte_eth_dev **vf_dev)
{
struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
int ret;
ret = rte_eth_dev_owner_get(port_id, &owner);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
return ret;
}
if (owner.id != RTE_ETH_DEV_NO_OWNER) {
PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
port_id, owner.name);
return -EBUSY;
}
ret = rte_eth_dev_owner_set(port_id, &hv->owner);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
return ret;
}
PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
rte_smp_wmb();
*vf_dev = &rte_eth_devices[port_id];
return 0;
}
/* Add new VF device to synthetic device */
int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
{
int port, err;
port = hn_vf_match(dev);
if (port < 0) {
PMD_DRV_LOG(NOTICE, "No matching MAC found");
return port;
}
rte_spinlock_lock(&hv->vf_lock);
if (hv->vf_dev) {
PMD_DRV_LOG(ERR, "VF already attached");
err = -EBUSY;
} else {
err = hn_vf_attach(hv, port, &hv->vf_dev);
}
if (err == 0) {
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
hv->vf_intr = (struct rte_intr_handle) {
.fd = -1,
.type = RTE_INTR_HANDLE_EXT,
};
dev->intr_handle = &hv->vf_intr;
hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
}
rte_spinlock_unlock(&hv->vf_lock);
return err;
}
/* Remove new VF device */
static void hn_vf_remove(struct hn_data *hv)
{
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (!vf_dev) {
PMD_DRV_LOG(ERR, "VF path not active");
rte_spinlock_unlock(&hv->vf_lock);
return;
}
/* Stop incoming packets from arriving on VF */
hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
hv->vf_dev = NULL;
/* Give back ownership */
rte_eth_dev_owner_unset(vf_dev->data->port_id, hv->owner.id);
rte_spinlock_unlock(&hv->vf_lock);
}
/* Handle VF association message from host */
void
hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
const struct vmbus_chanpkt_hdr *hdr,
const void *data)
{
struct hn_data *hv = dev->data->dev_private;
const struct hn_nvs_vf_association *vf_assoc = data;
if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
PMD_DRV_LOG(ERR, "invalid vf association NVS");
return;
}
PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
vf_assoc->serial,
vf_assoc->allocated ? "add to" : "remove from",
dev->data->port_id);
hv->vf_present = vf_assoc->allocated;
if (dev->state != RTE_ETH_DEV_ATTACHED)
return;
if (vf_assoc->allocated)
hn_vf_add(dev, hv);
else
hn_vf_remove(hv);
}
/*
* Merge the info from the VF and synthetic path.
* use the default config of the VF
* and the minimum number of queues and buffer sizes.
*/
static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
struct rte_eth_dev_info *info)
{
struct rte_eth_dev_info vf_info;
rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
info->speed_capa = vf_info.speed_capa;
info->default_rxportconf = vf_info.default_rxportconf;
info->default_txportconf = vf_info.default_txportconf;
info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
info->max_rx_queues);
info->rx_offload_capa &= vf_info.rx_offload_capa;
info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
info->max_tx_queues);
info->tx_offload_capa &= vf_info.tx_offload_capa;
info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
info->min_rx_bufsize);
info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
info->max_rx_pktlen);
}
void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
{
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
hn_vf_info_merge(vf_dev, info);
rte_spinlock_unlock(&hv->vf_lock);
}
int hn_vf_link_update(struct rte_eth_dev *dev,
int wait_to_complete)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev && vf_dev->dev_ops->link_update)
ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
/* called when VF has link state interrupts enabled */
static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
enum rte_eth_event_type event,
void *cb_arg, void *out __rte_unused)
{
struct rte_eth_dev *dev = cb_arg;
if (event != RTE_ETH_EVENT_INTR_LSC)
return 0;
/* if link state has changed pass on */
if (hn_dev_link_update(dev, 0) == 0)
return 0; /* no change */
return _rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
static int _hn_vf_configure(struct rte_eth_dev *dev,
struct rte_eth_dev *vf_dev,
const struct rte_eth_conf *dev_conf)
{
struct rte_eth_conf vf_conf = *dev_conf;
uint16_t vf_port = vf_dev->data->port_id;
int ret;
if (dev_conf->intr_conf.lsc &&
(vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
vf_port);
vf_conf.intr_conf.lsc = 1;
} else {
PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
vf_port);
vf_conf.intr_conf.lsc = 0;
}
ret = rte_eth_dev_configure(vf_port,
dev->data->nb_rx_queues,
dev->data->nb_tx_queues,
&vf_conf);
if (ret) {
PMD_DRV_LOG(ERR,
"VF configuration failed: %d", ret);
} else if (vf_conf.intr_conf.lsc) {
ret = rte_eth_dev_callback_register(vf_port,
RTE_ETH_DEV_INTR_LSC,
hn_vf_lsc_event, dev);
if (ret)
PMD_DRV_LOG(ERR,
"Failed to register LSC callback for VF %u",
vf_port);
}
return ret;
}
/*
* Configure VF if present.
* Force VF to have same number of queues as synthetic device
*/
int hn_vf_configure(struct rte_eth_dev *dev,
const struct rte_eth_conf *dev_conf)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
ret = _hn_vf_configure(dev, vf_dev, dev_conf);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
const uint32_t *ptypes = NULL;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
rte_spinlock_unlock(&hv->vf_lock);
return ptypes;
}
int hn_vf_start(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
ret = rte_eth_dev_start(vf_dev->data->port_id);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
void hn_vf_stop(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
rte_eth_dev_stop(vf_dev->data->port_id);
rte_spinlock_unlock(&hv->vf_lock);
}
/* If VF is present, then cascade configuration down */
#define VF_ETHDEV_FUNC(dev, func) \
{ \
struct hn_data *hv = (dev)->data->dev_private; \
struct rte_eth_dev *vf_dev; \
rte_spinlock_lock(&hv->vf_lock); \
vf_dev = hv->vf_dev; \
if (vf_dev) \
func(vf_dev->data->port_id); \
rte_spinlock_unlock(&hv->vf_lock); \
}
void hn_vf_reset(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
}
void hn_vf_close(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_dev_close);
}
void hn_vf_stats_reset(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_stats_reset);
}
void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
}
void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
}
void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
}
void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
{
VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
}
int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
mc_addr_set, nb_mc_addr);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, tx_conf);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
{
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
void *subq = vf_dev->data->tx_queues[queue_id];
(*vf_dev->dev_ops->tx_queue_release)(subq);
}
rte_spinlock_unlock(&hv->vf_lock);
}
int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
queue_idx, nb_desc,
socket_id, rx_conf, mp);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
{
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
void *subq = vf_dev->data->rx_queues[queue_id];
(*vf_dev->dev_ops->rx_queue_release)(subq);
}
rte_spinlock_unlock(&hv->vf_lock);
}
int hn_vf_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int ret = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev)
ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
rte_spinlock_unlock(&hv->vf_lock);
return ret;
}
int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *names,
unsigned int n)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int i, count = 0;
char tmp[RTE_ETH_XSTATS_NAME_SIZE];
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev && vf_dev->dev_ops->xstats_get_names)
count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
rte_spinlock_unlock(&hv->vf_lock);
/* add vf_ prefix to xstat names */
if (names) {
for (i = 0; i < count; i++) {
snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
strlcpy(names[i].name, tmp, sizeof(names[i].name));
}
}
return count;
}
int hn_vf_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats,
unsigned int n)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
int count = 0;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev && vf_dev->dev_ops->xstats_get)
count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
rte_spinlock_unlock(&hv->vf_lock);
return count;
}
void hn_vf_xstats_reset(struct rte_eth_dev *dev)
{
struct hn_data *hv = dev->data->dev_private;
struct rte_eth_dev *vf_dev;
rte_spinlock_lock(&hv->vf_lock);
vf_dev = hv->vf_dev;
if (vf_dev && vf_dev->dev_ops->xstats_reset)
vf_dev->dev_ops->xstats_reset(vf_dev);
rte_spinlock_unlock(&hv->vf_lock);
}