net/mlx5/linux: refactor VLAN

File mlx5_vlan.c contains Netlink APIs (Linux dependent) as part of VM
workaround implementation. Move this implementation to file
linux/mlx5_vlan_os.c.  To remove Netlink dependency in header files
change pointer of type 'struct mlx5_nl_vlan_vmwa_context *' to 'void *'.

Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
This commit is contained in:
Ophir Munk 2020-08-25 09:31:16 +00:00 committed by Ferruh Yigit
parent 8bb2410ea3
commit 7af10d29a4
4 changed files with 174 additions and 137 deletions

View File

@ -8,5 +8,6 @@ sources += files(
'mlx5_ethdev_os.c',
'mlx5_verbs.c',
'mlx5_mp_os.c',
'mlx5_vlan_os.c',
)

View File

@ -0,0 +1,168 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
* Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
#include <errno.h>
#include <stdint.h>
#include <unistd.h>
/*
* Not needed by this file; included to work around the lack of off_t
* definition for mlx5dv.h with unpatched rdma-core versions.
*/
#include <sys/types.h>
#include <rte_ethdev_driver.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_hypervisor.h>
#include <mlx5.h>
#include <mlx5_nl.h>
#include <mlx5_malloc.h>
/*
* Release VLAN network device, created for VM workaround.
*
* @param[in] dev
* Ethernet device object, Netlink context provider.
* @param[in] vlan
* Object representing the network device to release.
*/
void
mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
struct mlx5_vf_vlan *vlan)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
MLX5_ASSERT(vlan->created);
MLX5_ASSERT(priv->vmwa_context);
if (!vlan->created || !vmwa)
return;
vlan->created = 0;
MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
if (--vlan_dev[vlan->tag].refcnt == 0 &&
vlan_dev[vlan->tag].ifindex) {
mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
vlan_dev[vlan->tag].ifindex = 0;
}
}
/**
* Acquire VLAN interface with specified tag for VM workaround.
*
* @param[in] dev
* Ethernet device object, Netlink context provider.
* @param[in] vlan
* Object representing the network device to acquire.
*/
void
mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
struct mlx5_vf_vlan *vlan)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
MLX5_ASSERT(!vlan->created);
MLX5_ASSERT(priv->vmwa_context);
if (vlan->created || !vmwa)
return;
if (vlan_dev[vlan->tag].refcnt == 0) {
MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
vlan_dev[vlan->tag].ifindex =
mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
vlan->tag);
}
if (vlan_dev[vlan->tag].ifindex) {
vlan_dev[vlan->tag].refcnt++;
vlan->created = 1;
}
}
/*
* Create per ethernet device VLAN VM workaround context
*
* @param dev
* Pointer to Ethernet device structure.
* @param ifindex
* Interface index.
*
* @Return
* Pointer to mlx5_nl_vlan_vmwa_context
*/
void *
mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
struct mlx5_nl_vlan_vmwa_context *vmwa;
enum rte_hypervisor hv_type;
/* Do not engage workaround over PF. */
if (!config->vf)
return NULL;
/* Check whether there is desired virtual environment */
hv_type = rte_hypervisor_get();
switch (hv_type) {
case RTE_HYPERVISOR_UNKNOWN:
case RTE_HYPERVISOR_VMWARE:
/*
* The "white list" of configurations
* to engage the workaround.
*/
break;
default:
/*
* The configuration is not found in the "white list".
* We should not engage the VLAN workaround.
*/
return NULL;
}
vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
SOCKET_ID_ANY);
if (!vmwa) {
DRV_LOG(WARNING,
"Can not allocate memory"
" for VLAN workaround context");
return NULL;
}
vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
if (vmwa->nl_socket < 0) {
DRV_LOG(WARNING,
"Can not create Netlink socket"
" for VLAN workaround context");
mlx5_free(vmwa);
return NULL;
}
vmwa->vf_ifindex = ifindex;
/* Cleanup for existing VLAN devices. */
return vmwa;
}
/*
* Destroy per ethernet device VLAN VM workaround context
*
* @param dev
* Pointer to VM context
*/
void
mlx5_vlan_vmwa_exit(void *vmctx)
{
unsigned int i;
struct mlx5_nl_vlan_vmwa_context *vmwa = vmctx;
/* Delete all remaining VLAN devices. */
for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
if (vmwa->vlan_dev[i].ifindex)
mlx5_nl_vlan_vmwa_delete(vmwa,
vmwa->vlan_dev[i].ifindex);
}
if (vmwa->nl_socket >= 0)
close(vmwa->nl_socket);
mlx5_free(vmwa);
}

View File

@ -853,8 +853,6 @@ void mlx5_os_stats_init(struct rte_eth_dev *dev);
void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
uint32_t index, uint32_t vmdq);
struct mlx5_nl_vlan_vmwa_context *mlx5_vlan_vmwa_init
(struct rte_eth_dev *dev, uint32_t ifindex);
int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addr_set,
@ -897,11 +895,15 @@ int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *ctx);
/* mlx5_vlan_os.c */
void mlx5_vlan_vmwa_exit(void *ctx);
void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
struct mlx5_vf_vlan *vf_vlan);
void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
struct mlx5_vf_vlan *vf_vlan);
void *mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex);
/* mlx5_trigger.c */

View File

@ -13,11 +13,6 @@
#include <rte_malloc.h>
#include <rte_hypervisor.h>
#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_nl.h>
#include <mlx5_malloc.h>
#include "mlx5.h"
#include "mlx5_autoconf.h"
#include "mlx5_rxtx.h"
@ -162,132 +157,3 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
return 0;
}
/*
* Release VLAN network device, created for VM workaround.
*
* @param[in] dev
* Ethernet device object, Netlink context provider.
* @param[in] vlan
* Object representing the network device to release.
*/
void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
struct mlx5_vf_vlan *vlan)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
MLX5_ASSERT(vlan->created);
MLX5_ASSERT(priv->vmwa_context);
if (!vlan->created || !vmwa)
return;
vlan->created = 0;
MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
if (--vlan_dev[vlan->tag].refcnt == 0 &&
vlan_dev[vlan->tag].ifindex) {
mlx5_nl_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
vlan_dev[vlan->tag].ifindex = 0;
}
}
/**
* Acquire VLAN interface with specified tag for VM workaround.
*
* @param[in] dev
* Ethernet device object, Netlink context provider.
* @param[in] vlan
* Object representing the network device to acquire.
*/
void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
struct mlx5_vf_vlan *vlan)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_nl_vlan_vmwa_context *vmwa = priv->vmwa_context;
struct mlx5_nl_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
MLX5_ASSERT(!vlan->created);
MLX5_ASSERT(priv->vmwa_context);
if (vlan->created || !vmwa)
return;
if (vlan_dev[vlan->tag].refcnt == 0) {
MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
vlan_dev[vlan->tag].ifindex =
mlx5_nl_vlan_vmwa_create(vmwa, vmwa->vf_ifindex,
vlan->tag);
}
if (vlan_dev[vlan->tag].ifindex) {
vlan_dev[vlan->tag].refcnt++;
vlan->created = 1;
}
}
/*
* Create per ethernet device VLAN VM workaround context
*/
struct mlx5_nl_vlan_vmwa_context *
mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
struct mlx5_nl_vlan_vmwa_context *vmwa;
enum rte_hypervisor hv_type;
/* Do not engage workaround over PF. */
if (!config->vf)
return NULL;
/* Check whether there is desired virtual environment */
hv_type = rte_hypervisor_get();
switch (hv_type) {
case RTE_HYPERVISOR_UNKNOWN:
case RTE_HYPERVISOR_VMWARE:
/*
* The "white list" of configurations
* to engage the workaround.
*/
break;
default:
/*
* The configuration is not found in the "white list".
* We should not engage the VLAN workaround.
*/
return NULL;
}
vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
SOCKET_ID_ANY);
if (!vmwa) {
DRV_LOG(WARNING,
"Can not allocate memory"
" for VLAN workaround context");
return NULL;
}
vmwa->nl_socket = mlx5_nl_init(NETLINK_ROUTE);
if (vmwa->nl_socket < 0) {
DRV_LOG(WARNING,
"Can not create Netlink socket"
" for VLAN workaround context");
mlx5_free(vmwa);
return NULL;
}
vmwa->vf_ifindex = ifindex;
/* Cleanup for existing VLAN devices. */
return vmwa;
}
/*
* Destroy per ethernet device VLAN VM workaround context
*/
void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
{
unsigned int i;
/* Delete all remaining VLAN devices. */
for (i = 0; i < RTE_DIM(vmwa->vlan_dev); i++) {
if (vmwa->vlan_dev[i].ifindex)
mlx5_nl_vlan_vmwa_delete(vmwa,
vmwa->vlan_dev[i].ifindex);
}
if (vmwa->nl_socket >= 0)
close(vmwa->nl_socket);
mlx5_free(vmwa);
}