2018-01-29 13:11:31 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2012 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2012 Mellanox Technologies, Ltd
|
2015-02-25 13:52:05 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RTE_PMD_MLX4_H_
|
|
|
|
#define RTE_PMD_MLX4_H_
|
|
|
|
|
2017-09-01 08:06:58 +00:00
|
|
|
#include <net/if.h>
|
2015-02-25 13:52:05 +00:00
|
|
|
#include <stdint.h>
|
2017-10-12 12:19:18 +00:00
|
|
|
#include <sys/queue.h>
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2017-09-01 08:07:04 +00:00
|
|
|
/* Verbs headers do not support -pedantic. */
|
2017-03-05 07:51:31 +00:00
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic ignored "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
#include <infiniband/verbs.h>
|
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic error "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
|
2018-01-22 00:16:22 +00:00
|
|
|
#include <rte_ethdev_driver.h>
|
2017-09-01 08:06:52 +00:00
|
|
|
#include <rte_ether.h>
|
|
|
|
#include <rte_interrupts.h>
|
2017-09-01 08:06:57 +00:00
|
|
|
#include <rte_mempool.h>
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
#include <rte_rwlock.h>
|
|
|
|
|
|
|
|
#include "mlx4_mr.h"
|
2017-09-01 08:06:52 +00:00
|
|
|
|
2017-11-23 17:38:04 +00:00
|
|
|
#ifndef IBV_RX_HASH_INNER
|
|
|
|
/** This is not necessarily defined by supported RDMA core versions. */
|
|
|
|
#define IBV_RX_HASH_INNER (1ull << 31)
|
|
|
|
#endif /* IBV_RX_HASH_INNER */
|
|
|
|
|
2017-10-12 12:19:31 +00:00
|
|
|
/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */
|
|
|
|
#define MLX4_MAX_MAC_ADDRESSES 128
|
|
|
|
|
2017-10-12 12:19:18 +00:00
|
|
|
/** Request send completion once in every 64 sends, might be less. */
|
2015-06-30 09:28:00 +00:00
|
|
|
#define MLX4_PMD_TX_PER_COMP_REQ 64
|
|
|
|
|
2017-10-12 12:19:18 +00:00
|
|
|
/** Maximum size for inline data. */
|
2015-02-25 13:52:05 +00:00
|
|
|
#define MLX4_PMD_MAX_INLINE 0
|
|
|
|
|
2017-10-12 12:19:39 +00:00
|
|
|
/** Fixed RSS hash key size in bytes. Cannot be modified. */
|
|
|
|
#define MLX4_RSS_HASH_KEY_SIZE 40
|
|
|
|
|
2017-10-12 12:19:18 +00:00
|
|
|
/** Interrupt alarm timeout value in microseconds. */
|
2017-09-01 08:06:51 +00:00
|
|
|
#define MLX4_INTR_ALARM_TIMEOUT 100000
|
2015-10-30 18:57:22 +00:00
|
|
|
|
2018-07-10 10:45:54 +00:00
|
|
|
/* Maximum packet headers size (L2+L3+L4) for TSO. */
|
|
|
|
#define MLX4_MAX_TSO_HEADER 192
|
|
|
|
|
2017-10-12 12:19:18 +00:00
|
|
|
/** Port parameter. */
|
2017-03-27 15:41:37 +00:00
|
|
|
#define MLX4_PMD_PORT_KVARG "port"
|
|
|
|
|
2019-04-01 21:17:56 +00:00
|
|
|
/** Enable extending memsegs when creating a MR. */
|
|
|
|
#define MLX4_MR_EXT_MEMSEG_EN_KVARG "mr_ext_memseg_en"
|
|
|
|
|
2015-02-25 13:52:05 +00:00
|
|
|
enum {
|
|
|
|
PCI_VENDOR_ID_MELLANOX = 0x15b3,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3 = 0x1003,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3VF = 0x1004,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007,
|
|
|
|
};
|
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Request types for IPC. */
|
|
|
|
enum mlx4_mp_req_type {
|
|
|
|
MLX4_MP_REQ_VERBS_CMD_FD = 1,
|
2019-04-01 21:17:57 +00:00
|
|
|
MLX4_MP_REQ_CREATE_MR,
|
2019-04-01 21:15:53 +00:00
|
|
|
MLX4_MP_REQ_START_RXTX,
|
|
|
|
MLX4_MP_REQ_STOP_RXTX,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Pameters for IPC. */
|
|
|
|
struct mlx4_mp_param {
|
|
|
|
enum mlx4_mp_req_type type;
|
|
|
|
int port_id;
|
|
|
|
int result;
|
2019-04-01 21:17:57 +00:00
|
|
|
RTE_STD_C11
|
|
|
|
union {
|
|
|
|
uintptr_t addr; /* MLX4_MP_REQ_CREATE_MR */
|
|
|
|
} args;
|
2019-04-01 21:15:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/** Request timeout for IPC. */
|
|
|
|
#define MLX4_MP_REQ_TIMEOUT_SEC 5
|
|
|
|
|
|
|
|
/** Key string for IPC. */
|
|
|
|
#define MLX4_MP_NAME "net_mlx4_mp"
|
|
|
|
|
2017-10-12 12:19:18 +00:00
|
|
|
/** Driver name reported to lower layers and used in log output. */
|
2016-10-07 13:04:13 +00:00
|
|
|
#define MLX4_DRIVER_NAME "net_mlx4"
|
2015-02-25 13:52:05 +00:00
|
|
|
|
2017-10-12 12:19:25 +00:00
|
|
|
struct mlx4_drop;
|
net/mlx4: add RSS flow rule action support
This patch dissociates single-queue indirection tables and hash QP objects
from Rx queue structures to relinquish their control to users through the
RSS flow rule action, while simultaneously allowing multiple queues to be
associated with RSS contexts.
Flow rules share identical RSS contexts (hashed fields, hash key, target
queues) to save on memory and other resources. The trade-off is some added
complexity due to reference counters management on RSS contexts.
The QUEUE action is re-implemented on top of an automatically-generated
single-queue RSS context.
The following hardware limitations apply to RSS contexts:
- The number of queues in a group must be a power of two.
- Queue indices must be consecutive, for instance the [0 1 2 3] set is
allowed, however [3 2 1 0], [0 2 1 3] and [0 0 1 1 2 3 3 3] are not.
- The first queue of a group must be aligned to a multiple of the context
size, e.g. if queues [0 1 2 3 4] are defined globally, allowed group
combinations are [0 1] and [2 3]; groups [1 2] and [3 4] are not
supported.
- RSS hash key, while configurable per context, must be exactly 40 bytes
long.
- The only supported hash algorithm is Toeplitz.
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
2017-10-12 12:19:41 +00:00
|
|
|
struct mlx4_rss;
|
2017-09-01 08:06:56 +00:00
|
|
|
struct rxq;
|
|
|
|
struct txq;
|
2017-03-05 07:51:32 +00:00
|
|
|
struct rte_flow;
|
|
|
|
|
2019-04-01 21:15:52 +00:00
|
|
|
/**
|
2019-07-18 19:36:46 +00:00
|
|
|
* Type of object being allocated.
|
2019-04-01 21:15:52 +00:00
|
|
|
*/
|
|
|
|
enum mlx4_verbs_alloc_type {
|
|
|
|
MLX4_VERBS_ALLOC_TYPE_NONE,
|
|
|
|
MLX4_VERBS_ALLOC_TYPE_TX_QUEUE,
|
|
|
|
MLX4_VERBS_ALLOC_TYPE_RX_QUEUE,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Verbs allocator needs a context to know in the callback which kind of
|
|
|
|
* resources it is allocating.
|
|
|
|
*/
|
|
|
|
struct mlx4_verbs_alloc_ctx {
|
2019-04-01 21:15:53 +00:00
|
|
|
int enabled;
|
2019-04-01 21:15:52 +00:00
|
|
|
enum mlx4_verbs_alloc_type type; /* Kind of object being allocated. */
|
|
|
|
const void *obj; /* Pointer to the DPDK object. */
|
|
|
|
};
|
|
|
|
|
2019-02-21 09:29:14 +00:00
|
|
|
LIST_HEAD(mlx4_dev_list, mlx4_priv);
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
LIST_HEAD(mlx4_mr_list, mlx4_mr);
|
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
/* Shared data between primary and secondary processes. */
|
|
|
|
struct mlx4_shared_data {
|
|
|
|
rte_spinlock_t lock;
|
|
|
|
/* Global spinlock for primary and secondary processes. */
|
|
|
|
int init_done; /* Whether primary has done initialization. */
|
|
|
|
unsigned int secondary_cnt; /* Number of secondary processes init'd. */
|
|
|
|
struct mlx4_dev_list mem_event_cb_list;
|
|
|
|
rte_rwlock_t mem_event_rwlock;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Per-process data structure, not visible to other processes. */
|
|
|
|
struct mlx4_local_data {
|
|
|
|
int init_done; /* Whether a secondary has done initialization. */
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct mlx4_shared_data *mlx4_shared_data;
|
|
|
|
|
2019-04-10 18:41:18 +00:00
|
|
|
/* Per-process private structure. */
|
|
|
|
struct mlx4_proc_priv {
|
|
|
|
size_t uar_table_sz;
|
|
|
|
/* Size of UAR register table. */
|
|
|
|
void *uar_table[];
|
|
|
|
/* Table of UAR registers for each process. */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MLX4_PROC_PRIV(port_id) \
|
|
|
|
((struct mlx4_proc_priv *)rte_eth_devices[port_id].process_private)
|
|
|
|
|
2017-10-12 12:19:18 +00:00
|
|
|
/** Private data structure. */
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx4_priv {
|
|
|
|
LIST_ENTRY(mlx4_priv) mem_event_cb;
|
|
|
|
/**< Called by memory event callback. */
|
2019-04-01 21:15:51 +00:00
|
|
|
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
|
2017-10-12 12:19:18 +00:00
|
|
|
struct ibv_context *ctx; /**< Verbs context. */
|
|
|
|
struct ibv_device_attr device_attr; /**< Device properties. */
|
|
|
|
struct ibv_pd *pd; /**< Protection Domain. */
|
2017-03-05 07:51:31 +00:00
|
|
|
/* Device properties. */
|
2019-07-12 20:54:24 +00:00
|
|
|
unsigned int if_index; /**< Associated network device index */
|
2017-10-12 12:19:18 +00:00
|
|
|
uint16_t mtu; /**< Configured MTU. */
|
|
|
|
uint8_t port; /**< Physical port number. */
|
|
|
|
uint32_t started:1; /**< Device started, flows enabled. */
|
|
|
|
uint32_t vf:1; /**< This is a VF device. */
|
|
|
|
uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
|
|
|
|
uint32_t isolated:1; /**< Toggle isolated mode. */
|
2018-04-26 16:26:13 +00:00
|
|
|
uint32_t rss_init:1; /**< Common RSS context is initialized. */
|
2017-11-23 17:37:58 +00:00
|
|
|
uint32_t hw_csum:1; /**< Checksum offload is supported. */
|
|
|
|
uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */
|
2018-03-25 20:19:29 +00:00
|
|
|
uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */
|
2018-07-10 10:45:54 +00:00
|
|
|
uint32_t tso:1; /**< Transmit segmentation offload is supported. */
|
2019-04-01 21:17:56 +00:00
|
|
|
uint32_t mr_ext_memseg_en:1;
|
|
|
|
/** Whether memseg should be extended for MR creation. */
|
2018-07-10 10:45:54 +00:00
|
|
|
uint32_t tso_max_payload_sz; /**< Max supported TSO payload size. */
|
2018-07-25 14:47:39 +00:00
|
|
|
uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */
|
2017-11-23 17:38:02 +00:00
|
|
|
uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */
|
2017-10-12 12:19:18 +00:00
|
|
|
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
|
2017-10-12 12:19:25 +00:00
|
|
|
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
|
net/mlx4: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx4_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx4_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx4_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx4_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:06 +00:00
|
|
|
struct {
|
|
|
|
uint32_t dev_gen; /* Generation number to flush local caches. */
|
|
|
|
rte_rwlock_t rwlock; /* MR Lock. */
|
|
|
|
struct mlx4_mr_btree cache; /* Global MR cache table. */
|
|
|
|
struct mlx4_mr_list mr_list; /* Registered MR list. */
|
|
|
|
struct mlx4_mr_list mr_free_list; /* Freed MR list. */
|
|
|
|
} mr;
|
net/mlx4: add RSS flow rule action support
This patch dissociates single-queue indirection tables and hash QP objects
from Rx queue structures to relinquish their control to users through the
RSS flow rule action, while simultaneously allowing multiple queues to be
associated with RSS contexts.
Flow rules share identical RSS contexts (hashed fields, hash key, target
queues) to save on memory and other resources. The trade-off is some added
complexity due to reference counters management on RSS contexts.
The QUEUE action is re-implemented on top of an automatically-generated
single-queue RSS context.
The following hardware limitations apply to RSS contexts:
- The number of queues in a group must be a power of two.
- Queue indices must be consecutive, for instance the [0 1 2 3] set is
allowed, however [3 2 1 0], [0 2 1 3] and [0 0 1 1 2 3 3 3] are not.
- The first queue of a group must be aligned to a multiple of the context
size, e.g. if queues [0 1 2 3 4] are defined globally, allowed group
combinations are [0 1] and [2 3]; groups [1 2] and [3 4] are not
supported.
- RSS hash key, while configurable per context, must be exactly 40 bytes
long.
- The only supported hash algorithm is Toeplitz.
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
2017-10-12 12:19:41 +00:00
|
|
|
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
|
2017-10-12 12:19:18 +00:00
|
|
|
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
|
2017-10-12 12:19:31 +00:00
|
|
|
/**< Configured MAC addresses. Unused entries are zeroed. */
|
2019-04-18 13:10:34 +00:00
|
|
|
uint32_t mac_mc; /**< Number of trailing multicast entries in mac[]. */
|
2019-04-01 21:15:52 +00:00
|
|
|
struct mlx4_verbs_alloc_ctx verbs_alloc_ctx;
|
|
|
|
/**< Context for Verbs allocator. */
|
2017-03-05 07:51:31 +00:00
|
|
|
};
|
|
|
|
|
2019-04-01 21:15:51 +00:00
|
|
|
#define PORT_ID(priv) ((priv)->dev_data->port_id)
|
|
|
|
#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
|
|
|
|
|
2017-09-01 08:06:58 +00:00
|
|
|
/* mlx4_ethdev.c */
|
|
|
|
|
2019-02-21 09:29:14 +00:00
|
|
|
int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);
|
2019-05-21 16:13:05 +00:00
|
|
|
int mlx4_get_mac(struct mlx4_priv *priv, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]);
|
2019-02-21 09:29:14 +00:00
|
|
|
int mlx4_mtu_get(struct mlx4_priv *priv, uint16_t *mtu);
|
2017-09-01 08:06:58 +00:00
|
|
|
int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
|
|
|
|
int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
|
|
|
|
int mlx4_dev_set_link_up(struct rte_eth_dev *dev);
|
2019-09-14 11:37:24 +00:00
|
|
|
int mlx4_promiscuous_enable(struct rte_eth_dev *dev);
|
|
|
|
int mlx4_promiscuous_disable(struct rte_eth_dev *dev);
|
2019-09-24 12:56:10 +00:00
|
|
|
int mlx4_allmulticast_enable(struct rte_eth_dev *dev);
|
|
|
|
int mlx4_allmulticast_disable(struct rte_eth_dev *dev);
|
2017-10-12 12:19:31 +00:00
|
|
|
void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
|
2019-05-21 16:13:03 +00:00
|
|
|
int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
|
2017-10-12 12:19:31 +00:00
|
|
|
uint32_t index, uint32_t vmdq);
|
2019-05-21 16:13:03 +00:00
|
|
|
int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
|
|
|
|
int mlx4_set_mc_addr_list(struct rte_eth_dev *dev, struct rte_ether_addr *list,
|
2019-04-18 13:10:34 +00:00
|
|
|
uint32_t num);
|
2017-10-12 12:19:32 +00:00
|
|
|
int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
|
2017-10-10 20:20:18 +00:00
|
|
|
int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
|
2019-09-06 14:34:54 +00:00
|
|
|
int mlx4_stats_reset(struct rte_eth_dev *dev);
|
2019-02-06 22:25:19 +00:00
|
|
|
int mlx4_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
|
2019-09-12 16:42:28 +00:00
|
|
|
int mlx4_dev_infos_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_dev_info *info);
|
2017-09-01 08:06:55 +00:00
|
|
|
int mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete);
|
2017-09-01 08:06:58 +00:00
|
|
|
int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf);
|
|
|
|
int mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf);
|
2017-11-05 17:26:56 +00:00
|
|
|
const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
|
2018-01-20 21:12:20 +00:00
|
|
|
int mlx4_is_removed(struct rte_eth_dev *dev);
|
2017-09-01 08:06:55 +00:00
|
|
|
|
|
|
|
/* mlx4_intr.c */
|
|
|
|
|
2019-02-21 09:29:14 +00:00
|
|
|
int mlx4_intr_uninstall(struct mlx4_priv *priv);
|
|
|
|
int mlx4_intr_install(struct mlx4_priv *priv);
|
|
|
|
int mlx4_rxq_intr_enable(struct mlx4_priv *priv);
|
|
|
|
void mlx4_rxq_intr_disable(struct mlx4_priv *priv);
|
2017-09-01 08:06:55 +00:00
|
|
|
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
|
2019-04-01 21:15:53 +00:00
|
|
|
/* mlx4_mp.c */
|
|
|
|
void mlx4_mp_req_start_rxtx(struct rte_eth_dev *dev);
|
|
|
|
void mlx4_mp_req_stop_rxtx(struct rte_eth_dev *dev);
|
2019-04-01 21:17:57 +00:00
|
|
|
int mlx4_mp_req_mr_create(struct rte_eth_dev *dev, uintptr_t addr);
|
2019-04-01 21:15:53 +00:00
|
|
|
int mlx4_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev);
|
2019-04-25 12:45:15 +00:00
|
|
|
int mlx4_mp_init_primary(void);
|
2019-04-01 21:15:53 +00:00
|
|
|
void mlx4_mp_uninit_primary(void);
|
2019-04-25 12:45:15 +00:00
|
|
|
int mlx4_mp_init_secondary(void);
|
2019-04-01 21:15:53 +00:00
|
|
|
void mlx4_mp_uninit_secondary(void);
|
|
|
|
|
2015-02-25 13:52:05 +00:00
|
|
|
#endif /* RTE_PMD_MLX4_H_ */
|