2018-01-29 13:11:31 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2012 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2012 Mellanox Technologies, Ltd
|
2015-02-25 13:52:05 +00:00
|
|
|
*/
|
|
|
|
|
2017-09-01 08:07:04 +00:00
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* mlx4 driver initialization.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
2018-01-30 15:34:54 +00:00
|
|
|
#include <dlfcn.h>
|
2017-09-01 08:07:04 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <inttypes.h>
|
2015-02-25 13:52:05 +00:00
|
|
|
#include <stddef.h>
|
2017-09-01 08:07:04 +00:00
|
|
|
#include <stdint.h>
|
2015-02-25 13:52:05 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2019-04-01 21:15:53 +00:00
|
|
|
#include <sys/mman.h>
|
2018-01-30 15:34:54 +00:00
|
|
|
#include <unistd.h>
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2017-09-01 08:07:04 +00:00
|
|
|
/* Verbs headers do not support -pedantic. */
|
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic ignored "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
#include <infiniband/verbs.h>
|
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic error "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <rte_common.h>
|
2018-01-30 15:34:54 +00:00
|
|
|
#include <rte_config.h>
|
2015-02-25 13:52:05 +00:00
|
|
|
#include <rte_dev.h>
|
|
|
|
#include <rte_errno.h>
|
2018-01-22 00:16:22 +00:00
|
|
|
#include <rte_ethdev_driver.h>
|
2017-09-01 08:07:04 +00:00
|
|
|
#include <rte_ethdev_pci.h>
|
|
|
|
#include <rte_ether.h>
|
2017-10-12 12:19:29 +00:00
|
|
|
#include <rte_flow.h>
|
2017-06-06 14:48:29 +00:00
|
|
|
#include <rte_interrupts.h>
|
2017-09-01 08:07:04 +00:00
|
|
|
#include <rte_kvargs.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_mbuf.h>
|
2015-02-25 13:52:05 +00:00
|
|
|
|
|
|
|
#include "mlx4.h"
|
2018-01-30 15:34:52 +00:00
|
|
|
#include "mlx4_glue.h"
|
2017-03-05 07:51:32 +00:00
|
|
|
#include "mlx4_flow.h"
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
#include "mlx4_mr.h"
|
2017-09-01 08:06:56 +00:00
|
|
|
#include "mlx4_rxtx.h"
|
2017-09-01 08:06:49 +00:00
|
|
|
#include "mlx4_utils.h"
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
static const char *MZ_MLX4_PMD_SHARED_DATA = "mlx4_pmd_shared_data";
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Shared memory between primary and secondary processes. */
|
|
|
|
struct mlx4_shared_data *mlx4_shared_data;
|
|
|
|
|
|
|
|
/* Spinlock for mlx4_shared_data allocation. */
|
|
|
|
static rte_spinlock_t mlx4_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
|
|
|
|
|
|
|
|
/* Process local data for secondary processes. */
|
|
|
|
static struct mlx4_local_data mlx4_local_data;
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
|
2019-05-24 16:03:16 +00:00
|
|
|
/** Driver-specific log messages type. */
|
|
|
|
int mlx4_logtype;
|
|
|
|
|
2017-09-01 08:06:18 +00:00
|
|
|
/** Configuration structure for device arguments. */
|
2017-03-27 15:41:37 +00:00
|
|
|
struct mlx4_conf {
|
2017-09-01 08:06:18 +00:00
|
|
|
struct {
|
|
|
|
uint32_t present; /**< Bit-field for existing ports. */
|
|
|
|
uint32_t enabled; /**< Bit-field for user-enabled ports. */
|
|
|
|
} ports;
|
2019-04-01 21:17:56 +00:00
|
|
|
int mr_ext_memseg_en;
|
|
|
|
/** Whether memseg should be extended for MR creation. */
|
2017-03-27 15:41:37 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Available parameters list. */
|
|
|
|
const char *pmd_mlx4_init_params[] = {
|
|
|
|
MLX4_PMD_PORT_KVARG,
|
2019-04-01 21:17:56 +00:00
|
|
|
MLX4_MR_EXT_MEMSEG_EN_KVARG,
|
2017-03-27 15:41:37 +00:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2018-04-26 16:26:13 +00:00
|
|
|
static void mlx4_dev_stop(struct rte_eth_dev *dev);
|
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
/**
|
|
|
|
* Initialize shared data between primary and secondary process.
|
|
|
|
*
|
|
|
|
* A memzone is reserved by primary process and secondary processes attach to
|
|
|
|
* the memzone.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx4_init_shared_data(void)
|
|
|
|
{
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&mlx4_shared_data_lock);
|
|
|
|
if (mlx4_shared_data == NULL) {
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
/* Allocate shared memory. */
|
|
|
|
mz = rte_memzone_reserve(MZ_MLX4_PMD_SHARED_DATA,
|
|
|
|
sizeof(*mlx4_shared_data),
|
|
|
|
SOCKET_ID_ANY, 0);
|
|
|
|
if (mz == NULL) {
|
|
|
|
ERROR("Cannot allocate mlx4 shared data\n");
|
|
|
|
ret = -rte_errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
mlx4_shared_data = mz->addr;
|
|
|
|
memset(mlx4_shared_data, 0, sizeof(*mlx4_shared_data));
|
|
|
|
rte_spinlock_init(&mlx4_shared_data->lock);
|
|
|
|
} else {
|
|
|
|
/* Lookup allocated shared memory. */
|
|
|
|
mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA);
|
|
|
|
if (mz == NULL) {
|
|
|
|
ERROR("Cannot attach mlx4 shared data\n");
|
|
|
|
ret = -rte_errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
mlx4_shared_data = mz->addr;
|
|
|
|
memset(&mlx4_local_data, 0, sizeof(mlx4_local_data));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
error:
|
|
|
|
rte_spinlock_unlock(&mlx4_shared_data_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:15:52 +00:00
|
|
|
#ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
|
|
|
|
/**
|
|
|
|
* Verbs callback to allocate a memory. This function should allocate the space
|
|
|
|
* according to the size provided residing inside a huge page.
|
|
|
|
* Please note that all allocation must respect the alignment from libmlx4
|
|
|
|
* (i.e. currently sysconf(_SC_PAGESIZE)).
|
|
|
|
*
|
|
|
|
* @param[in] size
|
|
|
|
* The size in bytes of the memory to allocate.
|
|
|
|
* @param[in] data
|
|
|
|
* A pointer to the callback data.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Allocated buffer, NULL otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
mlx4_alloc_verbs_buf(size_t size, void *data)
|
|
|
|
{
|
|
|
|
struct mlx4_priv *priv = data;
|
|
|
|
void *ret;
|
|
|
|
size_t alignment = sysconf(_SC_PAGESIZE);
|
|
|
|
unsigned int socket = SOCKET_ID_ANY;
|
|
|
|
|
|
|
|
if (priv->verbs_alloc_ctx.type == MLX4_VERBS_ALLOC_TYPE_TX_QUEUE) {
|
|
|
|
const struct txq *txq = priv->verbs_alloc_ctx.obj;
|
|
|
|
|
|
|
|
socket = txq->socket;
|
|
|
|
} else if (priv->verbs_alloc_ctx.type ==
|
|
|
|
MLX4_VERBS_ALLOC_TYPE_RX_QUEUE) {
|
|
|
|
const struct rxq *rxq = priv->verbs_alloc_ctx.obj;
|
|
|
|
|
|
|
|
socket = rxq->socket;
|
|
|
|
}
|
|
|
|
assert(data != NULL);
|
|
|
|
ret = rte_malloc_socket(__func__, size, alignment, socket);
|
|
|
|
if (!ret && size)
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Verbs callback to free a memory.
|
|
|
|
*
|
|
|
|
* @param[in] ptr
|
|
|
|
* A pointer to the memory to free.
|
|
|
|
* @param[in] data
|
|
|
|
* A pointer to the callback data.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx4_free_verbs_buf(void *ptr, void *data __rte_unused)
|
|
|
|
{
|
|
|
|
assert(data != NULL);
|
|
|
|
rte_free(ptr);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-04-10 18:41:18 +00:00
|
|
|
/**
|
|
|
|
* Initialize process private data structure.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx4_proc_priv_init(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx4_proc_priv *ppriv;
|
|
|
|
size_t ppriv_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* UAR register table follows the process private structure. BlueFlame
|
|
|
|
* registers for Tx queues are stored in the table.
|
|
|
|
*/
|
|
|
|
ppriv_size = sizeof(struct mlx4_proc_priv) +
|
|
|
|
dev->data->nb_tx_queues * sizeof(void *);
|
|
|
|
ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size,
|
|
|
|
RTE_CACHE_LINE_SIZE, dev->device->numa_node);
|
|
|
|
if (!ppriv) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
ppriv->uar_table_sz = ppriv_size;
|
|
|
|
dev->process_private = ppriv;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Un-initialize process private data structure.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx4_proc_priv_uninit(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
if (!dev->process_private)
|
|
|
|
return;
|
|
|
|
rte_free(dev->process_private);
|
|
|
|
dev->process_private = NULL;
|
|
|
|
}
|
|
|
|
|
2015-02-25 13:52:05 +00:00
|
|
|
/**
|
2017-09-01 08:06:46 +00:00
|
|
|
* DPDK callback for Ethernet device configuration.
|
2015-02-25 13:52:05 +00:00
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
2017-09-01 08:06:43 +00:00
|
|
|
* 0 on success, negative errno value otherwise and rte_errno is set.
|
2015-02-25 13:52:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
2017-09-01 08:06:46 +00:00
|
|
|
mlx4_dev_configure(struct rte_eth_dev *dev)
|
2015-02-25 13:52:05 +00:00
|
|
|
{
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx4_priv *priv = dev->data->dev_private;
|
2017-10-12 12:19:29 +00:00
|
|
|
struct rte_flow_error error;
|
2017-10-12 12:19:27 +00:00
|
|
|
int ret;
|
|
|
|
|
2019-11-14 16:40:50 +00:00
|
|
|
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
|
|
|
|
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
|
2019-11-11 13:19:08 +00:00
|
|
|
|
2017-10-12 12:19:27 +00:00
|
|
|
/* Prepare internal flow rules. */
|
2017-10-12 12:19:29 +00:00
|
|
|
ret = mlx4_flow_sync(priv, &error);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
|
|
|
|
" flow error type %d, cause %p, message: %s",
|
|
|
|
-ret, strerror(-ret), error.type, error.cause,
|
|
|
|
error.message ? error.message : "(unspecified)");
|
2018-01-29 08:34:37 +00:00
|
|
|
goto exit;
|
2017-10-12 12:19:29 +00:00
|
|
|
}
|
2018-01-29 08:34:37 +00:00
|
|
|
ret = mlx4_intr_install(priv);
|
2019-04-10 18:41:18 +00:00
|
|
|
if (ret) {
|
2018-01-29 08:34:37 +00:00
|
|
|
ERROR("%p: interrupt handler installation failed",
|
|
|
|
(void *)dev);
|
2019-04-10 18:41:18 +00:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
ret = mlx4_proc_priv_init(dev);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("%p: process private data allocation failed",
|
|
|
|
(void *)dev);
|
|
|
|
goto exit;
|
|
|
|
}
|
2018-01-29 08:34:37 +00:00
|
|
|
exit:
|
2017-10-12 12:19:27 +00:00
|
|
|
return ret;
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DPDK callback to start the device.
|
|
|
|
*
|
2017-10-19 16:11:09 +00:00
|
|
|
* Simulate device start by initializing common RSS resources and attaching
|
|
|
|
* all configured flows.
|
2015-02-25 13:52:05 +00:00
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
2017-09-01 08:06:43 +00:00
|
|
|
* 0 on success, negative errno value otherwise and rte_errno is set.
|
2015-02-25 13:52:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx4_dev_start(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx4_priv *priv = dev->data->dev_private;
|
2017-10-12 12:19:29 +00:00
|
|
|
struct rte_flow_error error;
|
2017-03-05 07:51:32 +00:00
|
|
|
int ret;
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2017-09-01 08:06:45 +00:00
|
|
|
if (priv->started)
|
2015-02-25 13:52:05 +00:00
|
|
|
return 0;
|
|
|
|
DEBUG("%p: attaching configured flows to all RX queues", (void *)dev);
|
|
|
|
priv->started = 1;
|
2017-10-19 16:11:09 +00:00
|
|
|
ret = mlx4_rss_init(priv);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("%p: cannot initialize RSS resources: %s",
|
|
|
|
(void *)dev, strerror(-ret));
|
|
|
|
goto err;
|
|
|
|
}
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
mlx4_mr_dump_dev(dev);
|
|
|
|
#endif
|
2018-01-29 08:34:37 +00:00
|
|
|
ret = mlx4_rxq_intr_enable(priv);
|
2017-04-18 12:17:39 +00:00
|
|
|
if (ret) {
|
2017-09-01 08:06:54 +00:00
|
|
|
ERROR("%p: interrupt handler installation failed",
|
2017-04-18 12:17:39 +00:00
|
|
|
(void *)dev);
|
|
|
|
goto err;
|
|
|
|
}
|
2017-10-12 12:19:29 +00:00
|
|
|
ret = mlx4_flow_sync(priv, &error);
|
2017-03-05 07:51:32 +00:00
|
|
|
if (ret) {
|
2017-10-12 12:19:29 +00:00
|
|
|
ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
|
|
|
|
" flow error type %d, cause %p, message: %s",
|
|
|
|
(void *)dev,
|
|
|
|
-ret, strerror(-ret), error.type, error.cause,
|
|
|
|
error.message ? error.message : "(unspecified)");
|
2017-03-05 07:51:32 +00:00
|
|
|
goto err;
|
|
|
|
}
|
2017-10-12 12:19:35 +00:00
|
|
|
rte_wmb();
|
|
|
|
dev->tx_pkt_burst = mlx4_tx_burst;
|
|
|
|
dev->rx_pkt_burst = mlx4_rx_burst;
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Enable datapath on secondary process. */
|
|
|
|
mlx4_mp_req_start_rxtx(dev);
|
2015-02-25 13:52:05 +00:00
|
|
|
return 0;
|
2017-03-05 07:51:32 +00:00
|
|
|
err:
|
2018-04-26 16:26:13 +00:00
|
|
|
mlx4_dev_stop(dev);
|
2017-09-01 08:06:43 +00:00
|
|
|
return ret;
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DPDK callback to stop the device.
|
|
|
|
*
|
|
|
|
* Simulate device stop by detaching all configured flows.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx4_dev_stop(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx4_priv *priv = dev->data->dev_private;
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2017-09-01 08:06:45 +00:00
|
|
|
if (!priv->started)
|
2015-02-25 13:52:05 +00:00
|
|
|
return;
|
|
|
|
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
|
|
|
|
priv->started = 0;
|
2017-10-12 12:19:35 +00:00
|
|
|
dev->tx_pkt_burst = mlx4_tx_burst_removed;
|
|
|
|
dev->rx_pkt_burst = mlx4_rx_burst_removed;
|
|
|
|
rte_wmb();
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Disable datapath on secondary process. */
|
|
|
|
mlx4_mp_req_stop_rxtx(dev);
|
2017-10-12 12:19:29 +00:00
|
|
|
mlx4_flow_sync(priv, NULL);
|
2018-01-29 08:34:37 +00:00
|
|
|
mlx4_rxq_intr_disable(priv);
|
2017-10-19 16:11:09 +00:00
|
|
|
mlx4_rss_deinit(priv);
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DPDK callback to close the device.
|
|
|
|
*
|
|
|
|
* Destroy all queues and objects, free memory.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx4_dev_close(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx4_priv *priv = dev->data->dev_private;
|
2015-02-25 13:52:05 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
DEBUG("%p: closing device \"%s\"",
|
|
|
|
(void *)dev,
|
|
|
|
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
|
2017-09-01 08:06:57 +00:00
|
|
|
dev->rx_pkt_burst = mlx4_rx_burst_removed;
|
|
|
|
dev->tx_pkt_burst = mlx4_tx_burst_removed;
|
2017-10-12 12:19:35 +00:00
|
|
|
rte_wmb();
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Disable datapath on secondary process. */
|
|
|
|
mlx4_mp_req_stop_rxtx(dev);
|
2017-10-12 12:19:35 +00:00
|
|
|
mlx4_flow_clean(priv);
|
2018-04-26 16:26:13 +00:00
|
|
|
mlx4_rss_deinit(priv);
|
2017-09-01 08:07:06 +00:00
|
|
|
for (i = 0; i != dev->data->nb_rx_queues; ++i)
|
|
|
|
mlx4_rx_queue_release(dev->data->rx_queues[i]);
|
|
|
|
for (i = 0; i != dev->data->nb_tx_queues; ++i)
|
|
|
|
mlx4_tx_queue_release(dev->data->tx_queues[i]);
|
2019-04-10 18:41:18 +00:00
|
|
|
mlx4_proc_priv_uninit(dev);
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
mlx4_mr_release(dev);
|
2015-02-25 13:52:05 +00:00
|
|
|
if (priv->pd != NULL) {
|
|
|
|
assert(priv->ctx != NULL);
|
2018-01-30 15:34:52 +00:00
|
|
|
claim_zero(mlx4_glue->dealloc_pd(priv->pd));
|
|
|
|
claim_zero(mlx4_glue->close_device(priv->ctx));
|
2015-02-25 13:52:05 +00:00
|
|
|
} else
|
|
|
|
assert(priv->ctx == NULL);
|
2017-09-01 08:06:55 +00:00
|
|
|
mlx4_intr_uninstall(priv);
|
2015-02-25 13:52:05 +00:00
|
|
|
memset(priv, 0, sizeof(*priv));
|
|
|
|
}
|
|
|
|
|
2015-04-07 21:21:03 +00:00
|
|
|
static const struct eth_dev_ops mlx4_dev_ops = {
|
2015-02-25 13:52:05 +00:00
|
|
|
.dev_configure = mlx4_dev_configure,
|
|
|
|
.dev_start = mlx4_dev_start,
|
|
|
|
.dev_stop = mlx4_dev_stop,
|
2017-09-01 08:06:58 +00:00
|
|
|
.dev_set_link_down = mlx4_dev_set_link_down,
|
|
|
|
.dev_set_link_up = mlx4_dev_set_link_up,
|
2015-02-25 13:52:05 +00:00
|
|
|
.dev_close = mlx4_dev_close,
|
|
|
|
.link_update = mlx4_link_update,
|
2017-10-12 12:19:34 +00:00
|
|
|
.promiscuous_enable = mlx4_promiscuous_enable,
|
|
|
|
.promiscuous_disable = mlx4_promiscuous_disable,
|
|
|
|
.allmulticast_enable = mlx4_allmulticast_enable,
|
|
|
|
.allmulticast_disable = mlx4_allmulticast_disable,
|
2017-10-12 12:19:31 +00:00
|
|
|
.mac_addr_remove = mlx4_mac_addr_remove,
|
|
|
|
.mac_addr_add = mlx4_mac_addr_add,
|
|
|
|
.mac_addr_set = mlx4_mac_addr_set,
|
2019-04-18 13:10:34 +00:00
|
|
|
.set_mc_addr_list = mlx4_set_mc_addr_list,
|
2015-02-25 13:52:05 +00:00
|
|
|
.stats_get = mlx4_stats_get,
|
|
|
|
.stats_reset = mlx4_stats_reset,
|
2019-02-06 22:25:19 +00:00
|
|
|
.fw_version_get = mlx4_fw_version_get,
|
2015-02-25 13:52:05 +00:00
|
|
|
.dev_infos_get = mlx4_dev_infos_get,
|
2017-11-05 17:26:56 +00:00
|
|
|
.dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get,
|
2017-10-12 12:19:32 +00:00
|
|
|
.vlan_filter_set = mlx4_vlan_filter_set,
|
2015-02-25 13:52:05 +00:00
|
|
|
.rx_queue_setup = mlx4_rx_queue_setup,
|
|
|
|
.tx_queue_setup = mlx4_tx_queue_setup,
|
|
|
|
.rx_queue_release = mlx4_rx_queue_release,
|
|
|
|
.tx_queue_release = mlx4_tx_queue_release,
|
2017-09-01 08:06:58 +00:00
|
|
|
.flow_ctrl_get = mlx4_flow_ctrl_get,
|
|
|
|
.flow_ctrl_set = mlx4_flow_ctrl_set,
|
|
|
|
.mtu_set = mlx4_mtu_set,
|
2017-09-01 08:07:01 +00:00
|
|
|
.filter_ctrl = mlx4_filter_ctrl,
|
2017-06-06 14:48:29 +00:00
|
|
|
.rx_queue_intr_enable = mlx4_rx_intr_enable,
|
|
|
|
.rx_queue_intr_disable = mlx4_rx_intr_disable,
|
2018-01-20 21:12:20 +00:00
|
|
|
.is_removed = mlx4_is_removed,
|
2015-02-25 13:52:05 +00:00
|
|
|
};
|
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Available operations from secondary process. */
|
|
|
|
static const struct eth_dev_ops mlx4_dev_sec_ops = {
|
|
|
|
.stats_get = mlx4_stats_get,
|
|
|
|
.stats_reset = mlx4_stats_reset,
|
|
|
|
.fw_version_get = mlx4_fw_version_get,
|
|
|
|
.dev_infos_get = mlx4_dev_infos_get,
|
|
|
|
};
|
|
|
|
|
2015-02-25 13:52:05 +00:00
|
|
|
/**
|
|
|
|
* Get PCI information from struct ibv_device.
|
|
|
|
*
|
|
|
|
* @param device
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
* @param[out] pci_addr
|
|
|
|
* PCI bus address output buffer.
|
|
|
|
*
|
|
|
|
* @return
|
2017-09-01 08:06:43 +00:00
|
|
|
* 0 on success, negative errno value otherwise and rte_errno is set.
|
2015-02-25 13:52:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
|
|
|
|
struct rte_pci_addr *pci_addr)
|
|
|
|
{
|
|
|
|
FILE *file;
|
|
|
|
char line[32];
|
|
|
|
MKSTR(path, "%s/device/uevent", device->ibdev_path);
|
|
|
|
|
|
|
|
file = fopen(path, "rb");
|
2017-09-01 08:06:43 +00:00
|
|
|
if (file == NULL) {
|
|
|
|
rte_errno = errno;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2015-02-25 13:52:05 +00:00
|
|
|
while (fgets(line, sizeof(line), file) == line) {
|
|
|
|
size_t len = strlen(line);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Truncate long lines. */
|
|
|
|
if (len == (sizeof(line) - 1))
|
|
|
|
while (line[(len - 1)] != '\n') {
|
|
|
|
ret = fgetc(file);
|
|
|
|
if (ret == EOF)
|
|
|
|
break;
|
|
|
|
line[(len - 1)] = ret;
|
|
|
|
}
|
|
|
|
/* Extract information. */
|
|
|
|
if (sscanf(line,
|
|
|
|
"PCI_SLOT_NAME="
|
2017-07-05 16:55:32 +00:00
|
|
|
"%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
|
2015-02-25 13:52:05 +00:00
|
|
|
&pci_addr->domain,
|
|
|
|
&pci_addr->bus,
|
|
|
|
&pci_addr->devid,
|
|
|
|
&pci_addr->function) == 4) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fclose(file);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-27 15:41:37 +00:00
|
|
|
/**
|
|
|
|
* Verify and store value for device argument.
|
|
|
|
*
|
|
|
|
* @param[in] key
|
|
|
|
* Key argument to verify.
|
|
|
|
* @param[in] val
|
|
|
|
* Value associated with key.
|
2017-09-01 08:06:18 +00:00
|
|
|
* @param[in, out] conf
|
|
|
|
* Shared configuration data.
|
2017-03-27 15:41:37 +00:00
|
|
|
*
|
|
|
|
* @return
|
2017-09-01 08:06:43 +00:00
|
|
|
* 0 on success, negative errno value otherwise and rte_errno is set.
|
2017-03-27 15:41:37 +00:00
|
|
|
*/
|
|
|
|
static int
|
2017-09-01 08:06:18 +00:00
|
|
|
mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf)
|
2017-03-27 15:41:37 +00:00
|
|
|
{
|
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
errno = 0;
|
|
|
|
tmp = strtoul(val, NULL, 0);
|
|
|
|
if (errno) {
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = errno;
|
2017-03-27 15:41:37 +00:00
|
|
|
WARN("%s: \"%s\" is not a valid integer", key, val);
|
2017-09-01 08:06:43 +00:00
|
|
|
return -rte_errno;
|
2017-03-27 15:41:37 +00:00
|
|
|
}
|
|
|
|
if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
|
2018-01-23 23:32:47 +00:00
|
|
|
uint32_t ports = rte_log2_u32(conf->ports.present + 1);
|
2017-09-01 08:06:18 +00:00
|
|
|
|
|
|
|
if (tmp >= ports) {
|
|
|
|
ERROR("port index %lu outside range [0,%" PRIu32 ")",
|
|
|
|
tmp, ports);
|
2017-03-27 15:41:37 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-09-01 08:06:18 +00:00
|
|
|
if (!(conf->ports.present & (1 << tmp))) {
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = EINVAL;
|
2017-09-01 08:06:18 +00:00
|
|
|
ERROR("invalid port index %lu", tmp);
|
2017-09-01 08:06:43 +00:00
|
|
|
return -rte_errno;
|
2017-09-01 08:06:18 +00:00
|
|
|
}
|
|
|
|
conf->ports.enabled |= 1 << tmp;
|
2019-04-01 21:17:56 +00:00
|
|
|
} else if (strcmp(MLX4_MR_EXT_MEMSEG_EN_KVARG, key) == 0) {
|
|
|
|
conf->mr_ext_memseg_en = !!tmp;
|
2017-03-27 15:41:37 +00:00
|
|
|
} else {
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = EINVAL;
|
2017-03-27 15:41:37 +00:00
|
|
|
WARN("%s: unknown parameter", key);
|
2017-09-01 08:06:43 +00:00
|
|
|
return -rte_errno;
|
2017-03-27 15:41:37 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Parse device parameters.
|
|
|
|
*
|
|
|
|
* @param devargs
|
|
|
|
* Device arguments structure.
|
|
|
|
*
|
|
|
|
* @return
|
2017-09-01 08:06:43 +00:00
|
|
|
* 0 on success, negative errno value otherwise and rte_errno is set.
|
2017-03-27 15:41:37 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
|
|
|
|
{
|
|
|
|
struct rte_kvargs *kvlist;
|
|
|
|
unsigned int arg_count;
|
|
|
|
int ret = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (devargs == NULL)
|
|
|
|
return 0;
|
|
|
|
kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
|
|
|
|
if (kvlist == NULL) {
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = EINVAL;
|
2017-03-27 15:41:37 +00:00
|
|
|
ERROR("failed to parse kvargs");
|
2017-09-01 08:06:43 +00:00
|
|
|
return -rte_errno;
|
2017-03-27 15:41:37 +00:00
|
|
|
}
|
|
|
|
/* Process parameters. */
|
|
|
|
for (i = 0; pmd_mlx4_init_params[i]; ++i) {
|
2019-04-01 21:17:56 +00:00
|
|
|
arg_count = rte_kvargs_count(kvlist, pmd_mlx4_init_params[i]);
|
2017-03-27 15:41:37 +00:00
|
|
|
while (arg_count-- > 0) {
|
2017-09-01 08:06:18 +00:00
|
|
|
ret = rte_kvargs_process(kvlist,
|
2019-04-01 21:17:56 +00:00
|
|
|
pmd_mlx4_init_params[i],
|
2017-09-01 08:06:18 +00:00
|
|
|
(int (*)(const char *,
|
|
|
|
const char *,
|
|
|
|
void *))
|
|
|
|
mlx4_arg_parse,
|
|
|
|
conf);
|
2017-03-27 15:41:37 +00:00
|
|
|
if (ret != 0)
|
|
|
|
goto free_kvlist;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free_kvlist:
|
|
|
|
rte_kvargs_free(kvlist);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-04-26 16:26:16 +00:00
|
|
|
/**
|
|
|
|
* Interpret RSS capabilities reported by device.
|
|
|
|
*
|
|
|
|
* This function returns the set of usable Verbs RSS hash fields, kernel
|
|
|
|
* quirks taken into account.
|
|
|
|
*
|
|
|
|
* @param ctx
|
|
|
|
* Verbs context.
|
|
|
|
* @param pd
|
|
|
|
* Verbs protection domain.
|
|
|
|
* @param device_attr_ex
|
|
|
|
* Extended device attributes to interpret.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Usable RSS hash fields mask in Verbs format.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
|
|
|
|
struct ibv_device_attr_ex *device_attr_ex)
|
|
|
|
{
|
|
|
|
uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
|
|
|
|
struct ibv_cq *cq = NULL;
|
|
|
|
struct ibv_wq *wq = NULL;
|
|
|
|
struct ibv_rwq_ind_table *ind = NULL;
|
|
|
|
struct ibv_qp *qp = NULL;
|
|
|
|
|
|
|
|
if (!hw_rss_sup) {
|
|
|
|
WARN("no RSS capabilities reported; disabling support for UDP"
|
|
|
|
" RSS and inner VXLAN RSS");
|
|
|
|
return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
|
|
|
|
IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
|
|
|
|
IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
|
|
|
|
}
|
|
|
|
if (!(hw_rss_sup & IBV_RX_HASH_INNER))
|
|
|
|
return hw_rss_sup;
|
|
|
|
/*
|
|
|
|
* Although reported as supported, missing code in some Linux
|
|
|
|
* versions (v4.15, v4.16) prevents the creation of hash QPs with
|
|
|
|
* inner capability.
|
|
|
|
*
|
|
|
|
* There is no choice but to attempt to instantiate a temporary RSS
|
|
|
|
* context in order to confirm its support.
|
|
|
|
*/
|
|
|
|
cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
|
|
|
|
wq = cq ? mlx4_glue->create_wq
|
|
|
|
(ctx,
|
|
|
|
&(struct ibv_wq_init_attr){
|
|
|
|
.wq_type = IBV_WQT_RQ,
|
|
|
|
.max_wr = 1,
|
|
|
|
.max_sge = 1,
|
|
|
|
.pd = pd,
|
|
|
|
.cq = cq,
|
|
|
|
}) : NULL;
|
|
|
|
ind = wq ? mlx4_glue->create_rwq_ind_table
|
|
|
|
(ctx,
|
|
|
|
&(struct ibv_rwq_ind_table_init_attr){
|
|
|
|
.log_ind_tbl_size = 0,
|
|
|
|
.ind_tbl = &wq,
|
|
|
|
.comp_mask = 0,
|
|
|
|
}) : NULL;
|
|
|
|
qp = ind ? mlx4_glue->create_qp_ex
|
|
|
|
(ctx,
|
|
|
|
&(struct ibv_qp_init_attr_ex){
|
|
|
|
.comp_mask =
|
|
|
|
(IBV_QP_INIT_ATTR_PD |
|
|
|
|
IBV_QP_INIT_ATTR_RX_HASH |
|
|
|
|
IBV_QP_INIT_ATTR_IND_TABLE),
|
|
|
|
.qp_type = IBV_QPT_RAW_PACKET,
|
|
|
|
.pd = pd,
|
|
|
|
.rwq_ind_tbl = ind,
|
|
|
|
.rx_hash_conf = {
|
|
|
|
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
|
|
|
|
.rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
|
|
|
|
.rx_hash_key = mlx4_rss_hash_key_default,
|
|
|
|
.rx_hash_fields_mask = hw_rss_sup,
|
|
|
|
},
|
|
|
|
}) : NULL;
|
|
|
|
if (!qp) {
|
|
|
|
WARN("disabling unusable inner RSS capability due to kernel"
|
|
|
|
" quirk");
|
|
|
|
hw_rss_sup &= ~IBV_RX_HASH_INNER;
|
|
|
|
} else {
|
|
|
|
claim_zero(mlx4_glue->destroy_qp(qp));
|
|
|
|
}
|
|
|
|
if (ind)
|
|
|
|
claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
|
|
|
|
if (wq)
|
|
|
|
claim_zero(mlx4_glue->destroy_wq(wq));
|
|
|
|
if (cq)
|
|
|
|
claim_zero(mlx4_glue->destroy_cq(cq));
|
|
|
|
return hw_rss_sup;
|
|
|
|
}
|
|
|
|
|
2017-04-11 15:44:24 +00:00
|
|
|
static struct rte_pci_driver mlx4_driver;
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
/**
|
|
|
|
* PMD global initialization.
|
|
|
|
*
|
|
|
|
* Independent from individual device, this function initializes global
|
|
|
|
* per-PMD data structures distinguishing primary and secondary processes.
|
|
|
|
* Hence, each initialization is called once per a process.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx4_init_once(void)
|
|
|
|
{
|
|
|
|
struct mlx4_shared_data *sd;
|
|
|
|
struct mlx4_local_data *ld = &mlx4_local_data;
|
2019-04-25 12:45:15 +00:00
|
|
|
int ret = 0;
|
2019-04-01 21:15:53 +00:00
|
|
|
|
|
|
|
if (mlx4_init_shared_data())
|
|
|
|
return -rte_errno;
|
|
|
|
sd = mlx4_shared_data;
|
|
|
|
assert(sd);
|
|
|
|
rte_spinlock_lock(&sd->lock);
|
|
|
|
switch (rte_eal_process_type()) {
|
|
|
|
case RTE_PROC_PRIMARY:
|
|
|
|
if (sd->init_done)
|
|
|
|
break;
|
|
|
|
LIST_INIT(&sd->mem_event_cb_list);
|
|
|
|
rte_rwlock_init(&sd->mem_event_rwlock);
|
|
|
|
rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
|
|
|
|
mlx4_mr_mem_event_cb, NULL);
|
2019-04-25 12:45:15 +00:00
|
|
|
ret = mlx4_mp_init_primary();
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2019-07-05 13:10:30 +00:00
|
|
|
sd->init_done = 1;
|
2019-04-01 21:15:53 +00:00
|
|
|
break;
|
|
|
|
case RTE_PROC_SECONDARY:
|
|
|
|
if (ld->init_done)
|
|
|
|
break;
|
2019-04-25 12:45:15 +00:00
|
|
|
ret = mlx4_mp_init_secondary();
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2019-04-01 21:15:53 +00:00
|
|
|
++sd->secondary_cnt;
|
2019-07-05 13:10:30 +00:00
|
|
|
ld->init_done = 1;
|
2019-04-01 21:15:53 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2019-04-25 12:45:15 +00:00
|
|
|
out:
|
2019-04-01 21:15:53 +00:00
|
|
|
rte_spinlock_unlock(&sd->lock);
|
2019-04-25 12:45:15 +00:00
|
|
|
return ret;
|
2019-04-01 21:15:53 +00:00
|
|
|
}
|
|
|
|
|
2015-02-25 13:52:05 +00:00
|
|
|
/**
|
|
|
|
* DPDK callback to register a PCI device.
|
|
|
|
*
|
|
|
|
* This function creates an Ethernet device for each port of a given
|
|
|
|
* PCI device.
|
|
|
|
*
|
|
|
|
* @param[in] pci_drv
|
|
|
|
* PCI driver structure (mlx4_driver).
|
|
|
|
* @param[in] pci_dev
|
|
|
|
* PCI device information.
|
|
|
|
*
|
|
|
|
* @return
|
2017-09-01 08:06:43 +00:00
|
|
|
* 0 on success, negative errno value otherwise and rte_errno is set.
|
2015-02-25 13:52:05 +00:00
|
|
|
*/
|
|
|
|
static int
|
2016-09-20 12:41:15 +00:00
|
|
|
mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
|
2015-02-25 13:52:05 +00:00
|
|
|
{
|
|
|
|
struct ibv_device **list;
|
|
|
|
struct ibv_device *ibv_dev;
|
|
|
|
int err = 0;
|
|
|
|
struct ibv_context *attr_ctx = NULL;
|
|
|
|
struct ibv_device_attr device_attr;
|
2017-11-23 17:38:02 +00:00
|
|
|
struct ibv_device_attr_ex device_attr_ex;
|
2017-03-27 15:41:37 +00:00
|
|
|
struct mlx4_conf conf = {
|
2017-09-01 08:06:18 +00:00
|
|
|
.ports.present = 0,
|
2019-04-01 21:17:56 +00:00
|
|
|
.mr_ext_memseg_en = 1,
|
2017-03-27 15:41:37 +00:00
|
|
|
};
|
2015-02-25 13:52:05 +00:00
|
|
|
unsigned int vf;
|
|
|
|
int i;
|
2019-07-12 20:54:24 +00:00
|
|
|
char ifname[IF_NAMESIZE];
|
2015-02-25 13:52:05 +00:00
|
|
|
|
|
|
|
(void)pci_drv;
|
2019-04-01 21:15:53 +00:00
|
|
|
err = mlx4_init_once();
|
|
|
|
if (err) {
|
|
|
|
ERROR("unable to init PMD global data: %s",
|
|
|
|
strerror(rte_errno));
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2017-04-11 15:44:24 +00:00
|
|
|
assert(pci_drv == &mlx4_driver);
|
2018-01-30 15:34:52 +00:00
|
|
|
list = mlx4_glue->get_device_list(&i);
|
2015-02-25 13:52:05 +00:00
|
|
|
if (list == NULL) {
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = errno;
|
|
|
|
assert(rte_errno);
|
|
|
|
if (rte_errno == ENOSYS)
|
2017-03-28 14:13:11 +00:00
|
|
|
ERROR("cannot list devices, is ib_uverbs loaded?");
|
2017-09-01 08:06:43 +00:00
|
|
|
return -rte_errno;
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
|
|
|
assert(i >= 0);
|
|
|
|
/*
|
|
|
|
* For each listed device, check related sysfs entry against
|
|
|
|
* the provided PCI ID.
|
|
|
|
*/
|
|
|
|
while (i != 0) {
|
|
|
|
struct rte_pci_addr pci_addr;
|
|
|
|
|
|
|
|
--i;
|
|
|
|
DEBUG("checking device \"%s\"", list[i]->name);
|
|
|
|
if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr))
|
|
|
|
continue;
|
|
|
|
if ((pci_dev->addr.domain != pci_addr.domain) ||
|
|
|
|
(pci_dev->addr.bus != pci_addr.bus) ||
|
|
|
|
(pci_dev->addr.devid != pci_addr.devid) ||
|
|
|
|
(pci_dev->addr.function != pci_addr.function))
|
|
|
|
continue;
|
|
|
|
vf = (pci_dev->id.device_id ==
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3VF);
|
|
|
|
INFO("PCI information matches, using device \"%s\" (VF: %s)",
|
|
|
|
list[i]->name, (vf ? "true" : "false"));
|
2018-01-30 15:34:52 +00:00
|
|
|
attr_ctx = mlx4_glue->open_device(list[i]);
|
2015-02-25 13:52:05 +00:00
|
|
|
err = errno;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (attr_ctx == NULL) {
|
2018-01-30 15:34:52 +00:00
|
|
|
mlx4_glue->free_device_list(list);
|
2015-02-27 18:27:00 +00:00
|
|
|
switch (err) {
|
|
|
|
case 0:
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = ENODEV;
|
2017-03-28 14:13:11 +00:00
|
|
|
ERROR("cannot access device, is mlx4_ib loaded?");
|
2017-09-01 08:06:43 +00:00
|
|
|
return -rte_errno;
|
2015-02-27 18:27:00 +00:00
|
|
|
case EINVAL:
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = EINVAL;
|
2017-03-28 14:13:11 +00:00
|
|
|
ERROR("cannot use device, are drivers up to date?");
|
2017-09-01 08:06:43 +00:00
|
|
|
return -rte_errno;
|
2015-02-27 18:27:00 +00:00
|
|
|
}
|
2015-02-25 13:52:05 +00:00
|
|
|
assert(err > 0);
|
2017-09-01 08:06:43 +00:00
|
|
|
rte_errno = err;
|
|
|
|
return -rte_errno;
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
|
|
|
ibv_dev = list[i];
|
|
|
|
DEBUG("device opened");
|
2018-01-30 15:34:52 +00:00
|
|
|
if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENODEV;
|
2015-02-25 13:52:05 +00:00
|
|
|
goto error;
|
2017-07-31 14:30:29 +00:00
|
|
|
}
|
2015-02-25 13:52:05 +00:00
|
|
|
INFO("%u port(s) detected", device_attr.phys_port_cnt);
|
2017-09-01 08:06:18 +00:00
|
|
|
conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
|
2017-03-27 15:41:37 +00:00
|
|
|
if (mlx4_args(pci_dev->device.devargs, &conf)) {
|
|
|
|
ERROR("failed to process device arguments");
|
2018-05-22 15:36:59 +00:00
|
|
|
err = EINVAL;
|
2017-03-27 15:41:37 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* Use all ports when none are defined */
|
2017-09-01 08:06:18 +00:00
|
|
|
if (!conf.ports.enabled)
|
|
|
|
conf.ports.enabled = conf.ports.present;
|
2017-11-23 17:38:02 +00:00
|
|
|
/* Retrieve extended device attributes. */
|
2018-01-30 15:34:52 +00:00
|
|
|
if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENODEV;
|
2017-11-23 17:38:02 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2018-01-04 16:12:03 +00:00
|
|
|
assert(device_attr.max_sge >= MLX4_MAX_SGE);
|
2015-02-25 13:52:05 +00:00
|
|
|
for (i = 0; i < device_attr.phys_port_cnt; i++) {
|
|
|
|
uint32_t port = i + 1; /* ports are indexed from one */
|
|
|
|
struct ibv_context *ctx = NULL;
|
|
|
|
struct ibv_port_attr port_attr;
|
|
|
|
struct ibv_pd *pd = NULL;
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx4_priv *priv = NULL;
|
2015-11-23 14:44:42 +00:00
|
|
|
struct rte_eth_dev *eth_dev = NULL;
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr mac;
|
2019-04-01 21:15:53 +00:00
|
|
|
char name[RTE_ETH_NAME_MAX_LEN];
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2017-09-01 08:06:18 +00:00
|
|
|
/* If port is not enabled, skip. */
|
|
|
|
if (!(conf.ports.enabled & (1 << i)))
|
2017-03-27 15:41:37 +00:00
|
|
|
continue;
|
2017-09-01 08:06:17 +00:00
|
|
|
DEBUG("using port %u", port);
|
2018-01-30 15:34:52 +00:00
|
|
|
ctx = mlx4_glue->open_device(ibv_dev);
|
2017-07-31 14:30:29 +00:00
|
|
|
if (ctx == NULL) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENODEV;
|
2015-02-25 13:52:05 +00:00
|
|
|
goto port_error;
|
2017-07-31 14:30:29 +00:00
|
|
|
}
|
2019-04-01 21:15:53 +00:00
|
|
|
snprintf(name, sizeof(name), "%s port %u",
|
|
|
|
mlx4_glue->get_device_name(ibv_dev), port);
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
|
|
|
|
eth_dev = rte_eth_dev_attach_secondary(name);
|
|
|
|
if (eth_dev == NULL) {
|
|
|
|
ERROR("can not attach rte ethdev");
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
err = rte_errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
priv = eth_dev->data->dev_private;
|
|
|
|
if (!priv->verbs_alloc_ctx.enabled) {
|
|
|
|
ERROR("secondary process is not supported"
|
|
|
|
" due to lack of external allocator"
|
|
|
|
" from Verbs");
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
err = rte_errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
eth_dev->device = &pci_dev->device;
|
|
|
|
eth_dev->dev_ops = &mlx4_dev_sec_ops;
|
2019-04-10 18:41:18 +00:00
|
|
|
err = mlx4_proc_priv_init(eth_dev);
|
|
|
|
if (err)
|
|
|
|
goto error;
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Receive command fd from primary process. */
|
|
|
|
err = mlx4_mp_req_verbs_cmd_fd(eth_dev);
|
|
|
|
if (err < 0) {
|
|
|
|
err = rte_errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* Remap UAR for Tx queues. */
|
2019-04-10 18:41:18 +00:00
|
|
|
err = mlx4_tx_uar_init_secondary(eth_dev, err);
|
2019-04-01 21:15:53 +00:00
|
|
|
if (err) {
|
|
|
|
err = rte_errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Ethdev pointer is still required as input since
|
|
|
|
* the primary device is not accessible from the
|
|
|
|
* secondary process.
|
|
|
|
*/
|
|
|
|
eth_dev->tx_pkt_burst = mlx4_tx_burst;
|
|
|
|
eth_dev->rx_pkt_burst = mlx4_rx_burst;
|
|
|
|
claim_zero(mlx4_glue->close_device(ctx));
|
|
|
|
rte_eth_copy_pci_info(eth_dev, pci_dev);
|
|
|
|
rte_eth_dev_probing_finish(eth_dev);
|
|
|
|
continue;
|
|
|
|
}
|
2015-02-25 13:52:05 +00:00
|
|
|
/* Check port status. */
|
2018-01-30 15:34:52 +00:00
|
|
|
err = mlx4_glue->query_port(ctx, port, &port_attr);
|
2015-02-25 13:52:05 +00:00
|
|
|
if (err) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENODEV;
|
|
|
|
ERROR("port query failed: %s", strerror(err));
|
2015-02-25 13:52:05 +00:00
|
|
|
goto port_error;
|
|
|
|
}
|
2016-03-22 10:34:26 +00:00
|
|
|
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENOTSUP;
|
2016-03-22 10:34:26 +00:00
|
|
|
ERROR("port %d is not configured in Ethernet mode",
|
|
|
|
port);
|
|
|
|
goto port_error;
|
|
|
|
}
|
2015-02-25 13:52:05 +00:00
|
|
|
if (port_attr.state != IBV_PORT_ACTIVE)
|
2015-10-30 19:00:08 +00:00
|
|
|
DEBUG("port %d is not active: \"%s\" (%d)",
|
2018-01-30 15:34:52 +00:00
|
|
|
port, mlx4_glue->port_state_str(port_attr.state),
|
2015-10-30 19:00:08 +00:00
|
|
|
port_attr.state);
|
2017-09-01 08:06:52 +00:00
|
|
|
/* Make asynchronous FD non-blocking to handle interrupts. */
|
2018-05-22 15:36:59 +00:00
|
|
|
err = mlx4_fd_set_non_blocking(ctx->async_fd);
|
|
|
|
if (err) {
|
2017-09-01 08:06:52 +00:00
|
|
|
ERROR("cannot make asynchronous FD non-blocking: %s",
|
2018-05-22 15:36:59 +00:00
|
|
|
strerror(err));
|
2017-09-01 08:06:52 +00:00
|
|
|
goto port_error;
|
|
|
|
}
|
2015-02-25 13:52:05 +00:00
|
|
|
/* Allocate protection domain. */
|
2018-01-30 15:34:52 +00:00
|
|
|
pd = mlx4_glue->alloc_pd(ctx);
|
2015-02-25 13:52:05 +00:00
|
|
|
if (pd == NULL) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENOMEM;
|
2015-02-25 13:52:05 +00:00
|
|
|
ERROR("PD allocation failure");
|
|
|
|
goto port_error;
|
|
|
|
}
|
|
|
|
/* from rte_ethdev.c */
|
|
|
|
priv = rte_zmalloc("ethdev private structure",
|
|
|
|
sizeof(*priv),
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (priv == NULL) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENOMEM;
|
2015-02-25 13:52:05 +00:00
|
|
|
ERROR("priv allocation failure");
|
|
|
|
goto port_error;
|
|
|
|
}
|
|
|
|
priv->ctx = ctx;
|
|
|
|
priv->device_attr = device_attr;
|
|
|
|
priv->port = port;
|
|
|
|
priv->pd = pd;
|
2019-05-21 16:13:05 +00:00
|
|
|
priv->mtu = RTE_ETHER_MTU;
|
2015-02-25 13:52:05 +00:00
|
|
|
priv->vf = vf;
|
2017-10-12 12:29:58 +00:00
|
|
|
priv->hw_csum = !!(device_attr.device_cap_flags &
|
|
|
|
IBV_DEVICE_RAW_IP_CSUM);
|
|
|
|
DEBUG("checksum offloading is %ssupported",
|
|
|
|
(priv->hw_csum ? "" : "not "));
|
|
|
|
/* Only ConnectX-3 Pro supports tunneling. */
|
|
|
|
priv->hw_csum_l2tun =
|
|
|
|
priv->hw_csum &&
|
|
|
|
(device_attr.vendor_part_id ==
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
|
|
|
|
DEBUG("L2 tunnel checksum offloads are %ssupported",
|
2018-03-25 20:19:29 +00:00
|
|
|
priv->hw_csum_l2tun ? "" : "not ");
|
2018-04-26 16:26:16 +00:00
|
|
|
priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
|
|
|
|
&device_attr_ex);
|
2017-11-23 17:38:02 +00:00
|
|
|
DEBUG("supported RSS hash fields mask: %016" PRIx64,
|
|
|
|
priv->hw_rss_sup);
|
2018-07-25 14:47:39 +00:00
|
|
|
priv->hw_rss_max_qps =
|
|
|
|
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
|
|
|
|
DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
|
2018-03-25 20:19:29 +00:00
|
|
|
priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
|
|
|
|
IBV_RAW_PACKET_CAP_SCATTER_FCS);
|
|
|
|
DEBUG("FCS stripping toggling is %ssupported",
|
|
|
|
priv->hw_fcs_strip ? "" : "not ");
|
2018-07-10 10:45:54 +00:00
|
|
|
priv->tso =
|
|
|
|
((device_attr_ex.tso_caps.max_tso > 0) &&
|
|
|
|
(device_attr_ex.tso_caps.supported_qpts &
|
|
|
|
(1 << IBV_QPT_RAW_PACKET)));
|
|
|
|
if (priv->tso)
|
|
|
|
priv->tso_max_payload_sz =
|
|
|
|
device_attr_ex.tso_caps.max_tso;
|
|
|
|
DEBUG("TSO is %ssupported",
|
|
|
|
priv->tso ? "" : "not ");
|
2019-04-01 21:17:56 +00:00
|
|
|
priv->mr_ext_memseg_en = conf.mr_ext_memseg_en;
|
2015-06-30 09:27:58 +00:00
|
|
|
/* Configure the first MAC address by default. */
|
2018-05-22 15:36:59 +00:00
|
|
|
err = mlx4_get_mac(priv, &mac.addr_bytes);
|
|
|
|
if (err) {
|
2015-06-30 09:27:58 +00:00
|
|
|
ERROR("cannot get MAC address, is mlx4_en loaded?"
|
2018-05-22 15:36:59 +00:00
|
|
|
" (error: %s)", strerror(err));
|
2015-02-25 13:52:05 +00:00
|
|
|
goto port_error;
|
|
|
|
}
|
|
|
|
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
|
|
|
|
priv->port,
|
|
|
|
mac.addr_bytes[0], mac.addr_bytes[1],
|
|
|
|
mac.addr_bytes[2], mac.addr_bytes[3],
|
|
|
|
mac.addr_bytes[4], mac.addr_bytes[5]);
|
2017-09-01 08:06:27 +00:00
|
|
|
/* Register MAC address. */
|
2017-10-12 12:19:31 +00:00
|
|
|
priv->mac[0] = mac;
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2019-07-12 20:54:24 +00:00
|
|
|
if (mlx4_get_ifname(priv, &ifname) == 0) {
|
|
|
|
DEBUG("port %u ifname is \"%s\"",
|
|
|
|
priv->port, ifname);
|
|
|
|
priv->if_index = if_nametoindex(ifname);
|
|
|
|
} else {
|
|
|
|
DEBUG("port %u ifname is unknown", priv->port);
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
2019-07-12 20:54:24 +00:00
|
|
|
|
2015-02-25 13:52:05 +00:00
|
|
|
/* Get actual MTU if possible. */
|
2017-09-01 08:06:58 +00:00
|
|
|
mlx4_mtu_get(priv, &priv->mtu);
|
2015-02-25 13:52:05 +00:00
|
|
|
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
|
2019-04-01 21:15:53 +00:00
|
|
|
eth_dev = rte_eth_dev_allocate(name);
|
2015-02-25 13:52:05 +00:00
|
|
|
if (eth_dev == NULL) {
|
2018-05-22 15:36:59 +00:00
|
|
|
err = ENOMEM;
|
2015-02-25 13:52:05 +00:00
|
|
|
ERROR("can not allocate rte ethdev");
|
|
|
|
goto port_error;
|
|
|
|
}
|
2017-09-01 08:06:20 +00:00
|
|
|
eth_dev->data->dev_private = priv;
|
2017-10-12 12:19:31 +00:00
|
|
|
eth_dev->data->mac_addrs = priv->mac;
|
2016-12-23 15:58:11 +00:00
|
|
|
eth_dev->device = &pci_dev->device;
|
2015-11-03 13:01:56 +00:00
|
|
|
rte_eth_copy_pci_info(eth_dev, pci_dev);
|
2017-09-01 08:06:50 +00:00
|
|
|
/* Initialize local interrupt handle for current port. */
|
|
|
|
priv->intr_handle = (struct rte_intr_handle){
|
|
|
|
.fd = -1,
|
|
|
|
.type = RTE_INTR_HANDLE_EXT,
|
|
|
|
};
|
2017-06-14 11:49:12 +00:00
|
|
|
/*
|
2017-09-01 08:06:50 +00:00
|
|
|
* Override ethdev interrupt handle pointer with private
|
|
|
|
* handle instead of that of the parent PCI device used by
|
|
|
|
* default. This prevents it from being shared between all
|
|
|
|
* ports of the same PCI device since each of them is
|
|
|
|
* associated its own Verbs context.
|
|
|
|
*
|
|
|
|
* Rx interrupts in particular require this as the PMD has
|
|
|
|
* no control over the registration of queue interrupts
|
|
|
|
* besides setting up eth_dev->intr_handle, the rest is
|
|
|
|
* handled by rte_intr_rx_ctl().
|
2017-06-14 11:49:12 +00:00
|
|
|
*/
|
2017-09-01 08:06:50 +00:00
|
|
|
eth_dev->intr_handle = &priv->intr_handle;
|
2019-04-01 21:15:51 +00:00
|
|
|
priv->dev_data = eth_dev->data;
|
2015-02-25 13:52:05 +00:00
|
|
|
eth_dev->dev_ops = &mlx4_dev_ops;
|
2019-04-01 21:15:52 +00:00
|
|
|
#ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS
|
|
|
|
/* Hint libmlx4 to use PMD allocator for data plane resources */
|
|
|
|
struct mlx4dv_ctx_allocators alctr = {
|
|
|
|
.alloc = &mlx4_alloc_verbs_buf,
|
|
|
|
.free = &mlx4_free_verbs_buf,
|
|
|
|
.data = priv,
|
|
|
|
};
|
2019-04-01 21:15:53 +00:00
|
|
|
err = mlx4_glue->dv_set_context_attr
|
2019-04-01 21:15:52 +00:00
|
|
|
(ctx, MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS,
|
|
|
|
(void *)((uintptr_t)&alctr));
|
2019-04-01 21:15:53 +00:00
|
|
|
if (err)
|
|
|
|
WARN("Verbs external allocator is not supported");
|
|
|
|
else
|
|
|
|
priv->verbs_alloc_ctx.enabled = 1;
|
2019-04-01 21:15:52 +00:00
|
|
|
#endif
|
2015-02-25 13:52:05 +00:00
|
|
|
/* Bring Ethernet device up. */
|
|
|
|
DEBUG("forcing Ethernet interface up");
|
2019-04-01 21:15:51 +00:00
|
|
|
mlx4_dev_set_link_up(eth_dev);
|
2017-03-03 15:39:56 +00:00
|
|
|
/* Update link status once if waiting for LSC. */
|
|
|
|
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
|
|
|
|
mlx4_link_update(eth_dev, 0);
|
2018-05-27 07:04:55 +00:00
|
|
|
/*
|
|
|
|
* Once the device is added to the list of memory event
|
|
|
|
* callback, its global MR cache table cannot be expanded
|
|
|
|
* on the fly because of deadlock. If it overflows, lookup
|
|
|
|
* should be done by searching MR list linearly, which is slow.
|
|
|
|
*/
|
|
|
|
err = mlx4_mr_btree_init(&priv->mr.cache,
|
|
|
|
MLX4_MR_BTREE_CACHE_N * 2,
|
|
|
|
eth_dev->device->numa_node);
|
|
|
|
if (err) {
|
|
|
|
/* rte_errno is already set. */
|
|
|
|
goto port_error;
|
|
|
|
}
|
|
|
|
/* Add device to memory callback list. */
|
2019-04-01 21:15:53 +00:00
|
|
|
rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock);
|
|
|
|
LIST_INSERT_HEAD(&mlx4_shared_data->mem_event_cb_list,
|
|
|
|
priv, mem_event_cb);
|
|
|
|
rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock);
|
2018-05-10 23:58:30 +00:00
|
|
|
rte_eth_dev_probing_finish(eth_dev);
|
2015-02-25 13:52:05 +00:00
|
|
|
continue;
|
|
|
|
port_error:
|
2015-04-07 21:21:01 +00:00
|
|
|
rte_free(priv);
|
2018-10-19 02:07:55 +00:00
|
|
|
if (eth_dev != NULL)
|
|
|
|
eth_dev->data->dev_private = NULL;
|
2015-02-25 13:52:05 +00:00
|
|
|
if (pd)
|
2018-01-30 15:34:52 +00:00
|
|
|
claim_zero(mlx4_glue->dealloc_pd(pd));
|
2015-02-25 13:52:05 +00:00
|
|
|
if (ctx)
|
2018-01-30 15:34:52 +00:00
|
|
|
claim_zero(mlx4_glue->close_device(ctx));
|
2018-10-19 02:07:55 +00:00
|
|
|
if (eth_dev != NULL) {
|
|
|
|
/* mac_addrs must not be freed because part of dev_private */
|
|
|
|
eth_dev->data->mac_addrs = NULL;
|
2015-11-23 14:44:42 +00:00
|
|
|
rte_eth_dev_release_port(eth_dev);
|
2018-10-19 02:07:55 +00:00
|
|
|
}
|
2015-02-25 13:52:05 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* XXX if something went wrong in the loop above, there is a resource
|
|
|
|
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
|
|
|
|
* long as the dpdk does not provide a way to deallocate a ethdev and a
|
|
|
|
* way to enumerate the registered ethdevs to free the previous ones.
|
|
|
|
*/
|
|
|
|
error:
|
|
|
|
if (attr_ctx)
|
2018-01-30 15:34:52 +00:00
|
|
|
claim_zero(mlx4_glue->close_device(attr_ctx));
|
2015-02-25 13:52:05 +00:00
|
|
|
if (list)
|
2018-01-30 15:34:52 +00:00
|
|
|
mlx4_glue->free_device_list(list);
|
2018-05-22 15:36:59 +00:00
|
|
|
if (err)
|
|
|
|
rte_errno = err;
|
|
|
|
return -err;
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
|
|
|
|
2015-04-16 23:23:39 +00:00
|
|
|
static const struct rte_pci_id mlx4_pci_id_map[] = {
|
2015-02-25 13:52:05 +00:00
|
|
|
{
|
2016-06-24 13:17:40 +00:00
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3)
|
2015-02-25 13:52:05 +00:00
|
|
|
},
|
|
|
|
{
|
2016-06-24 13:17:40 +00:00
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
|
2015-02-25 13:52:05 +00:00
|
|
|
},
|
|
|
|
{
|
2016-06-24 13:17:40 +00:00
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
|
2015-02-25 13:52:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.vendor_id = 0
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-04-11 15:44:24 +00:00
|
|
|
static struct rte_pci_driver mlx4_driver = {
|
|
|
|
.driver = {
|
|
|
|
.name = MLX4_DRIVER_NAME
|
2015-02-25 13:52:05 +00:00
|
|
|
},
|
2017-04-11 15:44:24 +00:00
|
|
|
.id_table = mlx4_pci_id_map,
|
|
|
|
.probe = mlx4_pci_probe,
|
2019-07-22 12:56:51 +00:00
|
|
|
.drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV,
|
2015-02-25 13:52:05 +00:00
|
|
|
};
|
|
|
|
|
2019-01-09 14:23:18 +00:00
|
|
|
#ifdef RTE_IBVERBS_LINK_DLOPEN
|
2018-01-30 15:34:54 +00:00
|
|
|
|
2018-03-02 14:15:17 +00:00
|
|
|
/**
|
|
|
|
* Suffix RTE_EAL_PMD_PATH with "-glue".
|
|
|
|
*
|
|
|
|
* This function performs a sanity check on RTE_EAL_PMD_PATH before
|
|
|
|
* suffixing its last component.
|
|
|
|
*
|
|
|
|
* @param buf[out]
|
|
|
|
* Output buffer, should be large enough otherwise NULL is returned.
|
|
|
|
* @param size
|
|
|
|
* Size of @p out.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Pointer to @p buf or @p NULL in case suffix cannot be appended.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
mlx4_glue_path(char *buf, size_t size)
|
|
|
|
{
|
|
|
|
static const char *const bad[] = { "/", ".", "..", NULL };
|
|
|
|
const char *path = RTE_EAL_PMD_PATH;
|
|
|
|
size_t len = strlen(path);
|
|
|
|
size_t off;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
while (len && path[len - 1] == '/')
|
|
|
|
--len;
|
|
|
|
for (off = len; off && path[off - 1] != '/'; --off)
|
|
|
|
;
|
|
|
|
for (i = 0; bad[i]; ++i)
|
|
|
|
if (!strncmp(path + off, bad[i], (int)(len - off)))
|
|
|
|
goto error;
|
|
|
|
i = snprintf(buf, size, "%.*s-glue", (int)len, path);
|
|
|
|
if (i == -1 || (size_t)i >= size)
|
|
|
|
goto error;
|
|
|
|
return buf;
|
|
|
|
error:
|
|
|
|
ERROR("unable to append \"-glue\" to last component of"
|
|
|
|
" RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
|
|
|
|
" please re-configure DPDK");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-30 15:34:54 +00:00
|
|
|
/**
|
|
|
|
* Initialization routine for run-time dependency on rdma-core.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx4_glue_init(void)
|
|
|
|
{
|
2018-03-02 14:15:17 +00:00
|
|
|
char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
|
2018-02-02 16:46:18 +00:00
|
|
|
const char *path[] = {
|
|
|
|
/*
|
|
|
|
* A basic security check is necessary before trusting
|
|
|
|
* MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
|
|
|
|
*/
|
|
|
|
(geteuid() == getuid() && getegid() == getgid() ?
|
|
|
|
getenv("MLX4_GLUE_PATH") : NULL),
|
2018-03-02 14:15:17 +00:00
|
|
|
/*
|
|
|
|
* When RTE_EAL_PMD_PATH is set, use its glue-suffixed
|
|
|
|
* variant, otherwise let dlopen() look up libraries on its
|
|
|
|
* own.
|
|
|
|
*/
|
|
|
|
(*RTE_EAL_PMD_PATH ?
|
|
|
|
mlx4_glue_path(glue_path, sizeof(glue_path)) : ""),
|
2018-02-02 16:46:18 +00:00
|
|
|
};
|
|
|
|
unsigned int i = 0;
|
2018-01-30 15:34:54 +00:00
|
|
|
void *handle = NULL;
|
|
|
|
void **sym;
|
|
|
|
const char *dlmsg;
|
|
|
|
|
2018-02-02 16:46:18 +00:00
|
|
|
while (!handle && i != RTE_DIM(path)) {
|
|
|
|
const char *end;
|
|
|
|
size_t len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!path[i]) {
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
end = strpbrk(path[i], ":;");
|
|
|
|
if (!end)
|
|
|
|
end = path[i] + strlen(path[i]);
|
|
|
|
len = end - path[i];
|
|
|
|
ret = 0;
|
|
|
|
do {
|
|
|
|
char name[ret + 1];
|
|
|
|
|
|
|
|
ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE,
|
|
|
|
(int)len, path[i],
|
|
|
|
(!len || *(end - 1) == '/') ? "" : "/");
|
|
|
|
if (ret == -1)
|
|
|
|
break;
|
|
|
|
if (sizeof(name) != (size_t)ret + 1)
|
|
|
|
continue;
|
|
|
|
DEBUG("looking for rdma-core glue as \"%s\"", name);
|
|
|
|
handle = dlopen(name, RTLD_LAZY);
|
|
|
|
break;
|
|
|
|
} while (1);
|
|
|
|
path[i] = end + 1;
|
|
|
|
if (!*end)
|
|
|
|
++i;
|
|
|
|
}
|
2018-01-30 15:34:54 +00:00
|
|
|
if (!handle) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
dlmsg = dlerror();
|
|
|
|
if (dlmsg)
|
|
|
|
WARN("cannot load glue library: %s", dlmsg);
|
|
|
|
goto glue_error;
|
|
|
|
}
|
|
|
|
sym = dlsym(handle, "mlx4_glue");
|
|
|
|
if (!sym || !*sym) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
dlmsg = dlerror();
|
|
|
|
if (dlmsg)
|
|
|
|
ERROR("cannot resolve glue symbol: %s", dlmsg);
|
|
|
|
goto glue_error;
|
|
|
|
}
|
|
|
|
mlx4_glue = *sym;
|
|
|
|
return 0;
|
|
|
|
glue_error:
|
|
|
|
if (handle)
|
|
|
|
dlclose(handle);
|
|
|
|
WARN("cannot initialize PMD due to missing run-time"
|
|
|
|
" dependency on rdma-core libraries (libibverbs,"
|
|
|
|
" libmlx4)");
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2015-02-25 13:52:05 +00:00
|
|
|
/**
|
|
|
|
* Driver initialization routine.
|
|
|
|
*/
|
2018-06-18 12:32:21 +00:00
|
|
|
RTE_INIT(rte_mlx4_pmd_init)
|
2015-02-25 13:52:05 +00:00
|
|
|
{
|
2019-05-24 16:03:16 +00:00
|
|
|
/* Initialize driver log type. */
|
|
|
|
mlx4_logtype = rte_log_register("pmd.net.mlx4");
|
|
|
|
if (mlx4_logtype >= 0)
|
|
|
|
rte_log_set_level(mlx4_logtype, RTE_LOG_NOTICE);
|
|
|
|
|
2017-11-29 09:21:26 +00:00
|
|
|
/*
|
|
|
|
* MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
|
|
|
|
* want to get success errno value in case of calling them
|
|
|
|
* when the device was removed.
|
|
|
|
*/
|
|
|
|
setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1);
|
2015-06-30 09:27:52 +00:00
|
|
|
/*
|
|
|
|
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
|
|
|
|
* huge pages. Calling ibv_fork_init() during init allows
|
|
|
|
* applications to use fork() safely for purposes other than
|
|
|
|
* using this PMD, which is not supported in forked processes.
|
|
|
|
*/
|
|
|
|
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
|
2019-01-09 14:23:18 +00:00
|
|
|
#ifdef RTE_IBVERBS_LINK_DLOPEN
|
2018-01-30 15:34:54 +00:00
|
|
|
if (mlx4_glue_init())
|
|
|
|
return;
|
|
|
|
assert(mlx4_glue);
|
2018-02-02 16:46:12 +00:00
|
|
|
#endif
|
|
|
|
#ifndef NDEBUG
|
|
|
|
/* Glue structure must not contain any NULL pointers. */
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i)
|
|
|
|
assert(((const void *const *)mlx4_glue)[i]);
|
|
|
|
}
|
2018-01-30 15:34:54 +00:00
|
|
|
#endif
|
2018-02-02 16:46:16 +00:00
|
|
|
if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) {
|
|
|
|
ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
|
|
|
|
mlx4_glue->version, MLX4_GLUE_VERSION);
|
|
|
|
return;
|
|
|
|
}
|
2018-01-30 15:34:52 +00:00
|
|
|
mlx4_glue->fork_init();
|
2017-05-04 14:48:59 +00:00
|
|
|
rte_pci_register(&mlx4_driver);
|
2015-02-25 13:52:05 +00:00
|
|
|
}
|
|
|
|
|
2016-10-10 05:43:15 +00:00
|
|
|
RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
|
|
|
|
RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
|
2016-12-15 13:46:39 +00:00
|
|
|
RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
|
|
|
|
"* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");
|