2018-01-29 14:11:31 +01:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2012 6WIND S.A.
|
2018-03-20 21:20:35 +02:00
|
|
|
* Copyright 2012 Mellanox Technologies, Ltd
|
2015-02-25 14:52:05 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RTE_PMD_MLX4_H_
|
|
|
|
#define RTE_PMD_MLX4_H_
|
|
|
|
|
2017-09-01 10:06:58 +02:00
|
|
|
#include <net/if.h>
|
2015-02-25 14:52:05 +01:00
|
|
|
#include <stdint.h>
|
2017-10-12 14:19:18 +02:00
|
|
|
#include <sys/queue.h>
|
2015-02-25 14:52:05 +01:00
|
|
|
|
2017-09-01 10:07:04 +02:00
|
|
|
/* Verbs headers do not support -pedantic. */
|
2017-03-05 09:51:31 +02:00
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic ignored "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
#include <infiniband/verbs.h>
|
|
|
|
#ifdef PEDANTIC
|
|
|
|
#pragma GCC diagnostic error "-Wpedantic"
|
|
|
|
#endif
|
|
|
|
|
2018-01-22 00:16:22 +00:00
|
|
|
#include <rte_ethdev_driver.h>
|
2017-09-01 10:06:52 +02:00
|
|
|
#include <rte_ether.h>
|
|
|
|
#include <rte_interrupts.h>
|
2017-09-01 10:06:57 +02:00
|
|
|
#include <rte_mempool.h>
|
2017-11-02 19:14:22 +01:00
|
|
|
#include <rte_spinlock.h>
|
2017-09-01 10:06:52 +02:00
|
|
|
|
2017-11-23 18:38:04 +01:00
|
|
|
#ifndef IBV_RX_HASH_INNER
|
|
|
|
/** This is not necessarily defined by supported RDMA core versions. */
|
|
|
|
#define IBV_RX_HASH_INNER (1ull << 31)
|
|
|
|
#endif /* IBV_RX_HASH_INNER */
|
|
|
|
|
2017-10-12 14:19:31 +02:00
|
|
|
/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */
|
|
|
|
#define MLX4_MAX_MAC_ADDRESSES 128
|
|
|
|
|
2017-10-12 14:19:18 +02:00
|
|
|
/** Request send completion once in every 64 sends, might be less. */
|
2015-06-30 11:28:00 +02:00
|
|
|
#define MLX4_PMD_TX_PER_COMP_REQ 64
|
|
|
|
|
2017-10-12 14:19:18 +02:00
|
|
|
/** Maximum size for inline data. */
|
2015-02-25 14:52:05 +01:00
|
|
|
#define MLX4_PMD_MAX_INLINE 0
|
|
|
|
|
2017-10-12 14:19:39 +02:00
|
|
|
/** Fixed RSS hash key size in bytes. Cannot be modified. */
|
|
|
|
#define MLX4_RSS_HASH_KEY_SIZE 40
|
|
|
|
|
2017-10-12 14:19:18 +02:00
|
|
|
/**
|
2015-02-25 14:52:05 +01:00
|
|
|
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
|
|
|
|
* from which buffers are to be transmitted will have to be mapped by this
|
|
|
|
* driver to their own Memory Region (MR). This is a slow operation.
|
|
|
|
*
|
|
|
|
* This value is always 1 for RX queues.
|
|
|
|
*/
|
|
|
|
#ifndef MLX4_PMD_TX_MP_CACHE
|
|
|
|
#define MLX4_PMD_TX_MP_CACHE 8
|
|
|
|
#endif
|
|
|
|
|
2017-10-12 14:19:18 +02:00
|
|
|
/** Interrupt alarm timeout value in microseconds. */
|
2017-09-01 10:06:51 +02:00
|
|
|
#define MLX4_INTR_ALARM_TIMEOUT 100000
|
2015-10-30 19:57:22 +01:00
|
|
|
|
2017-10-12 14:19:18 +02:00
|
|
|
/** Port parameter. */
|
2017-03-27 17:41:37 +02:00
|
|
|
#define MLX4_PMD_PORT_KVARG "port"
|
|
|
|
|
2015-02-25 14:52:05 +01:00
|
|
|
enum {
|
|
|
|
PCI_VENDOR_ID_MELLANOX = 0x15b3,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3 = 0x1003,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3VF = 0x1004,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007,
|
|
|
|
};
|
|
|
|
|
2017-10-12 14:19:18 +02:00
|
|
|
/** Driver name reported to lower layers and used in log output. */
|
2016-10-07 15:04:13 +02:00
|
|
|
#define MLX4_DRIVER_NAME "net_mlx4"
|
2015-02-25 14:52:05 +01:00
|
|
|
|
2017-10-12 14:19:25 +02:00
|
|
|
struct mlx4_drop;
|
net/mlx4: add RSS flow rule action support
This patch dissociates single-queue indirection tables and hash QP objects
from Rx queue structures to relinquish their control to users through the
RSS flow rule action, while simultaneously allowing multiple queues to be
associated with RSS contexts.
Flow rules share identical RSS contexts (hashed fields, hash key, target
queues) to save on memory and other resources. The trade-off is some added
complexity due to reference counters management on RSS contexts.
The QUEUE action is re-implemented on top of an automatically-generated
single-queue RSS context.
The following hardware limitations apply to RSS contexts:
- The number of queues in a group must be a power of two.
- Queue indices must be consecutive, for instance the [0 1 2 3] set is
allowed, however [3 2 1 0], [0 2 1 3] and [0 0 1 1 2 3 3 3] are not.
- The first queue of a group must be aligned to a multiple of the context
size, e.g. if queues [0 1 2 3 4] are defined globally, allowed group
combinations are [0 1] and [2 3]; groups [1 2] and [3 4] are not
supported.
- RSS hash key, while configurable per context, must be exactly 40 bytes
long.
- The only supported hash algorithm is Toeplitz.
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
2017-10-12 14:19:41 +02:00
|
|
|
struct mlx4_rss;
|
2017-09-01 10:06:56 +02:00
|
|
|
struct rxq;
|
|
|
|
struct txq;
|
2017-03-05 09:51:32 +02:00
|
|
|
struct rte_flow;
|
|
|
|
|
2017-11-02 19:14:22 +01:00
|
|
|
/** Memory region descriptor. */
|
|
|
|
struct mlx4_mr {
|
|
|
|
LIST_ENTRY(mlx4_mr) next; /**< Next entry in list. */
|
|
|
|
uintptr_t start; /**< Base address for memory region. */
|
|
|
|
uintptr_t end; /**< End address for memory region. */
|
|
|
|
uint32_t lkey; /**< L_Key extracted from @p mr. */
|
|
|
|
uint32_t refcnt; /**< Reference count for this object. */
|
|
|
|
struct priv *priv; /**< Back pointer to private data. */
|
|
|
|
struct ibv_mr *mr; /**< Memory region associated with @p mp. */
|
|
|
|
struct rte_mempool *mp; /**< Target memory pool (mempool). */
|
|
|
|
};
|
|
|
|
|
2017-10-12 14:19:18 +02:00
|
|
|
/** Private data structure. */
|
2017-03-05 09:51:31 +02:00
|
|
|
struct priv {
|
2017-10-12 14:19:18 +02:00
|
|
|
struct rte_eth_dev *dev; /**< Ethernet device. */
|
|
|
|
struct ibv_context *ctx; /**< Verbs context. */
|
|
|
|
struct ibv_device_attr device_attr; /**< Device properties. */
|
|
|
|
struct ibv_pd *pd; /**< Protection Domain. */
|
2017-03-05 09:51:31 +02:00
|
|
|
/* Device properties. */
|
2017-10-12 14:19:18 +02:00
|
|
|
uint16_t mtu; /**< Configured MTU. */
|
|
|
|
uint8_t port; /**< Physical port number. */
|
|
|
|
uint32_t started:1; /**< Device started, flows enabled. */
|
|
|
|
uint32_t vf:1; /**< This is a VF device. */
|
|
|
|
uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
|
|
|
|
uint32_t isolated:1; /**< Toggle isolated mode. */
|
2017-11-23 18:37:58 +01:00
|
|
|
uint32_t hw_csum:1; /**< Checksum offload is supported. */
|
|
|
|
uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */
|
2018-03-25 20:19:29 +00:00
|
|
|
uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */
|
2017-11-23 18:38:02 +01:00
|
|
|
uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */
|
2017-10-12 14:19:18 +02:00
|
|
|
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
|
2017-10-12 14:19:25 +02:00
|
|
|
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
|
net/mlx4: add RSS flow rule action support
This patch dissociates single-queue indirection tables and hash QP objects
from Rx queue structures to relinquish their control to users through the
RSS flow rule action, while simultaneously allowing multiple queues to be
associated with RSS contexts.
Flow rules share identical RSS contexts (hashed fields, hash key, target
queues) to save on memory and other resources. The trade-off is some added
complexity due to reference counters management on RSS contexts.
The QUEUE action is re-implemented on top of an automatically-generated
single-queue RSS context.
The following hardware limitations apply to RSS contexts:
- The number of queues in a group must be a power of two.
- Queue indices must be consecutive, for instance the [0 1 2 3] set is
allowed, however [3 2 1 0], [0 2 1 3] and [0 0 1 1 2 3 3 3] are not.
- The first queue of a group must be aligned to a multiple of the context
size, e.g. if queues [0 1 2 3 4] are defined globally, allowed group
combinations are [0 1] and [2 3]; groups [1 2] and [3 4] are not
supported.
- RSS hash key, while configurable per context, must be exactly 40 bytes
long.
- The only supported hash algorithm is Toeplitz.
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
2017-10-12 14:19:41 +02:00
|
|
|
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
|
2017-10-12 14:19:18 +02:00
|
|
|
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
|
2017-11-02 19:14:22 +01:00
|
|
|
LIST_HEAD(, mlx4_mr) mr; /**< Registered memory regions. */
|
|
|
|
rte_spinlock_t mr_lock; /**< Lock for @p mr access. */
|
2017-10-12 14:19:31 +02:00
|
|
|
struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
|
|
|
|
/**< Configured MAC addresses. Unused entries are zeroed. */
|
2017-03-05 09:51:31 +02:00
|
|
|
};
|
|
|
|
|
2017-09-01 10:06:58 +02:00
|
|
|
/* mlx4_ethdev.c */
|
|
|
|
|
|
|
|
int mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]);
|
|
|
|
int mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
|
|
|
|
int mlx4_mtu_get(struct priv *priv, uint16_t *mtu);
|
|
|
|
int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
|
|
|
|
int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
|
|
|
|
int mlx4_dev_set_link_up(struct rte_eth_dev *dev);
|
2017-10-12 14:19:34 +02:00
|
|
|
void mlx4_promiscuous_enable(struct rte_eth_dev *dev);
|
|
|
|
void mlx4_promiscuous_disable(struct rte_eth_dev *dev);
|
|
|
|
void mlx4_allmulticast_enable(struct rte_eth_dev *dev);
|
|
|
|
void mlx4_allmulticast_disable(struct rte_eth_dev *dev);
|
2017-10-12 14:19:31 +02:00
|
|
|
void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
|
|
|
|
int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
|
|
|
|
uint32_t index, uint32_t vmdq);
|
2018-04-11 18:32:51 +02:00
|
|
|
int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
|
2017-10-12 14:19:32 +02:00
|
|
|
int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
|
2017-10-10 20:20:18 +00:00
|
|
|
int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
|
2017-09-01 10:06:58 +02:00
|
|
|
void mlx4_stats_reset(struct rte_eth_dev *dev);
|
|
|
|
void mlx4_dev_infos_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_dev_info *info);
|
2017-09-01 10:06:55 +02:00
|
|
|
int mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete);
|
2017-09-01 10:06:58 +02:00
|
|
|
int mlx4_flow_ctrl_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf);
|
|
|
|
int mlx4_flow_ctrl_set(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_fc_conf *fc_conf);
|
2017-11-05 19:26:56 +02:00
|
|
|
const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev);
|
2018-01-20 21:12:20 +00:00
|
|
|
int mlx4_is_removed(struct rte_eth_dev *dev);
|
2017-09-01 10:06:55 +02:00
|
|
|
|
|
|
|
/* mlx4_intr.c */
|
|
|
|
|
|
|
|
int mlx4_intr_uninstall(struct priv *priv);
|
|
|
|
int mlx4_intr_install(struct priv *priv);
|
2018-01-29 10:34:37 +02:00
|
|
|
int mlx4_rxq_intr_enable(struct priv *priv);
|
|
|
|
void mlx4_rxq_intr_disable(struct priv *priv);
|
2017-09-01 10:06:55 +02:00
|
|
|
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
|
|
|
|
|
2017-09-01 10:07:03 +02:00
|
|
|
/* mlx4_mr.c */
|
|
|
|
|
2017-11-02 19:14:22 +01:00
|
|
|
struct mlx4_mr *mlx4_mr_get(struct priv *priv, struct rte_mempool *mp);
|
|
|
|
void mlx4_mr_put(struct mlx4_mr *mr);
|
2017-11-02 16:42:45 +00:00
|
|
|
uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp,
|
|
|
|
uint32_t i);
|
2017-09-01 10:07:03 +02:00
|
|
|
|
2015-02-25 14:52:05 +01:00
|
|
|
#endif /* RTE_PMD_MLX4_H_ */
|