Add vendor specific capability interface support in mlx5core.

Add the ability to access the vendor specific space gateway in order
to support reading and writing data into the different configuration
domains.

Submitted by:	Matthew Finlay <matt@mellanox.com>
MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2018-03-08 11:59:47 +00:00
parent fba294620c
commit 4b95c6659a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=330651
7 changed files with 254 additions and 1 deletions

View File

@ -4757,6 +4757,8 @@ dev/mlx5/mlx5_core/mlx5_uar.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_vport.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_vsc.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_wq.c optional mlx5 pci \
compile-with "${OFED_C}"

View File

@ -138,6 +138,26 @@ __mlx5_mask(typ, fld))
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
/* insert a value to a struct */
#define MLX5_VSC_SET(typ, p, fld, v) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
*((__le32 *)(p) + __mlx5_dw_off(typ, fld)) = \
cpu_to_le32((le32_to_cpu(*((__le32 *)(p) + __mlx5_dw_off(typ, fld))) & \
(~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
#define MLX5_VSC_GET(typ, p, fld) ((le32_to_cpu(*((__le32 *)(p) +\
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
__mlx5_mask(typ, fld))
#define MLX5_VSC_GET_PR(typ, p, fld) ({ \
u32 ___t = MLX5_VSC_GET(typ, p, fld); \
pr_debug(#fld " = 0x%x\n", ___t); \
___t; \
})
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,

View File

@ -221,6 +221,16 @@ enum mlx5_link_mode {
MLX5_LINK_MODES_NUMBER,
};
enum {
MLX5_VSC_SPACE_SUPPORTED = 0x1,
MLX5_VSC_SPACE_OFFSET = 0x4,
MLX5_VSC_COUNTER_OFFSET = 0x8,
MLX5_VSC_SEMA_OFFSET = 0xC,
MLX5_VSC_ADDR_OFFSET = 0x10,
MLX5_VSC_DATA_OFFSET = 0x14,
MLX5_VSC_MAX_RETRIES = 0x1000,
};
#define MLX5_PROT_MASK(link_mode) (1 << link_mode)
struct mlx5_uuar_info {
@ -627,6 +637,7 @@ struct mlx5_core_dev {
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
u32 vsc_addr;
u32 issi;
struct mlx5_special_contexts special_contexts;
unsigned int module_status[MLX5_MAX_PORTS];
@ -1001,6 +1012,12 @@ int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in,
int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev,
u8 num_of_samples, u16 sample_index,
void *out, int out_size);
int mlx5_vsc_find_cap(struct mlx5_core_dev *mdev);
int mlx5_vsc_lock(struct mlx5_core_dev *mdev);
void mlx5_vsc_unlock(struct mlx5_core_dev *mdev);
int mlx5_vsc_set_space(struct mlx5_core_dev *mdev, u16 space);
int mlx5_vsc_write(struct mlx5_core_dev *mdev, u32 addr, u32 *data);
int mlx5_vsc_read(struct mlx5_core_dev *mdev, u32 addr, u32 *data);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
return mkey >> 8;

View File

@ -846,6 +846,9 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_clr_master;
}
if (mlx5_vsc_find_cap(dev))
dev_err(&pdev->dev, "Unable to find vendor specific capabilities\n");
return 0;
err_clr_master:
@ -1184,7 +1187,6 @@ static int init_one(struct pci_dev *pdev,
goto clean_health;
}
return 0;
clean_health:

View File

@ -0,0 +1,204 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <dev/mlx5/driver.h>
#include <dev/mlx5/device.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
struct mlx5_ifc_vsc_space_bits {
u8 status[0x3];
u8 reserved0[0xd];
u8 space[0x10];
};
struct mlx5_ifc_vsc_addr_bits {
u8 flag[0x1];
u8 reserved0[0x1];
u8 address[0x1e];
};
int mlx5_vsc_lock(struct mlx5_core_dev *mdev)
{
device_t dev = mdev->pdev->dev.bsddev;
int vsc_addr = mdev->vsc_addr;
int retries = 0;
u32 lock_val;
u32 counter;
if (!vsc_addr) {
mlx5_core_warn(mdev, "Unable to acquire vsc lock, vsc_addr not initialized\n");
return EINVAL;
}
while (true) {
if (retries > MLX5_VSC_MAX_RETRIES)
return EBUSY;
if (pci_read_config(dev, vsc_addr + MLX5_VSC_SEMA_OFFSET, 4)) {
retries++;
/*
* The PRM suggests random 0 - 10ms to prevent multiple
* waiters on the same interval in order to avoid starvation
*/
DELAY((random() % 11) * 1000);
continue;
}
counter = pci_read_config(dev, vsc_addr + MLX5_VSC_COUNTER_OFFSET, 4);
pci_write_config(dev, vsc_addr + MLX5_VSC_SEMA_OFFSET, counter, 4);
lock_val = pci_read_config(dev, vsc_addr + MLX5_VSC_SEMA_OFFSET, 4);
if (lock_val == counter)
break;
retries++;
}
return 0;
}
void mlx5_vsc_unlock(struct mlx5_core_dev *mdev)
{
device_t dev = mdev->pdev->dev.bsddev;
int vsc_addr = mdev->vsc_addr;
if (!vsc_addr) {
mlx5_core_warn(mdev, "Unable to release vsc lock, vsc_addr not initialized\n");
return;
}
pci_write_config(dev, vsc_addr + MLX5_VSC_SEMA_OFFSET, 0, 4);
}
static int mlx5_vsc_wait_on_flag(struct mlx5_core_dev *mdev, u32 expected)
{
device_t dev = mdev->pdev->dev.bsddev;
int vsc_addr = mdev->vsc_addr;
int retries = 0;
u32 flag;
while (true) {
if (retries > MLX5_VSC_MAX_RETRIES)
return EBUSY;
flag = pci_read_config(dev, vsc_addr + MLX5_VSC_ADDR_OFFSET, 4);
if (expected == MLX5_VSC_GET(vsc_addr, &flag, flag))
break;
retries++;
DELAY(10);
}
return 0;
}
int mlx5_vsc_set_space(struct mlx5_core_dev *mdev, u16 space)
{
device_t dev = mdev->pdev->dev.bsddev;
int vsc_addr = mdev->vsc_addr;
u32 vsc_space = 0;
if (!vsc_addr) {
mlx5_core_warn(mdev, "Unable to set vsc space, vsc_addr not initialized\n");
return EINVAL;
}
MLX5_VSC_SET(vsc_space, &vsc_space, space, space);
pci_write_config(dev, vsc_addr + MLX5_VSC_SPACE_OFFSET, vsc_space, 4);
vsc_space = pci_read_config(dev, vsc_addr + MLX5_VSC_SPACE_OFFSET, 4);
if (MLX5_VSC_GET(vsc_space, &vsc_space, status) != MLX5_VSC_SPACE_SUPPORTED) {
mlx5_core_warn(mdev, "Space 0x%x is not supported.\n", space);
return ENOTSUP;
}
return 0;
}
int mlx5_vsc_write(struct mlx5_core_dev *mdev, u32 addr, u32 *data)
{
device_t dev = mdev->pdev->dev.bsddev;
int vsc_addr = mdev->vsc_addr;
u32 in = 0;
int err;
if (!vsc_addr) {
mlx5_core_warn(mdev, "Unable to call vsc write, vsc_addr not initialized\n");
return EINVAL;
}
MLX5_VSC_SET(vsc_addr, &in, address, addr);
MLX5_VSC_SET(vsc_addr, &in, flag, 1);
pci_write_config(dev, vsc_addr + MLX5_VSC_DATA_OFFSET, *data, 4);
pci_write_config(dev, vsc_addr + MLX5_VSC_ADDR_OFFSET, in, 4);
err = mlx5_vsc_wait_on_flag(mdev, 0);
if (err)
mlx5_core_warn(mdev, "Failed waiting for write flag!\n");
return err;
}
int mlx5_vsc_read(struct mlx5_core_dev *mdev, u32 addr, u32 *data)
{
device_t dev = mdev->pdev->dev.bsddev;
int vsc_addr = mdev->vsc_addr;
int err;
u32 in;
if (!vsc_addr) {
mlx5_core_warn(mdev, "Unable to call vsc read, vsc_addr not initialized\n");
return EINVAL;
}
MLX5_VSC_SET(vsc_addr, &in, address, addr);
pci_write_config(dev, vsc_addr + MLX5_VSC_ADDR_OFFSET, in, 4);
err = mlx5_vsc_wait_on_flag(mdev, 1);
if (err) {
mlx5_core_warn(mdev, "Failed waiting for read complete flag!\n");
return err;
}
*data = pci_read_config(dev, vsc_addr + MLX5_VSC_DATA_OFFSET, 4);
return 0;
}
int mlx5_vsc_find_cap(struct mlx5_core_dev *mdev)
{
int *capreg = &mdev->vsc_addr;
int err;
err = pci_find_cap(mdev->pdev->dev.bsddev, PCIY_VENDOR, capreg);
if (err)
*capreg = 0;
return err;
}

View File

@ -9328,6 +9328,13 @@ struct mlx5_ifc_mtt_bits {
u8 rd_en[0x1];
};
/* Vendor Specific Capabilities, VSC */
enum {
MLX5_VSC_DOMAIN_ICMD = 0x1,
MLX5_VSC_DOMAIN_PROTECTED_CRSPACE = 0x6,
MLX5_VSC_DOMAIN_SEMAPHORES = 0xA,
};
struct mlx5_ifc_vendor_specific_cap_bits {
u8 type[0x8];
u8 length[0x8];

View File

@ -24,6 +24,7 @@ mlx5_srq.c \
mlx5_transobj.c \
mlx5_uar.c \
mlx5_vport.c \
mlx5_vsc.c \
mlx5_wq.c \
device_if.h bus_if.h vnode_if.h pci_if.h \
opt_inet.h opt_inet6.h opt_rss.h