2018-01-29 13:11:30 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2015 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2015 Mellanox Technologies, Ltd
|
2015-10-30 18:52:30 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
2016-06-24 13:17:50 +00:00
|
|
|
#include <errno.h>
|
2022-06-16 08:41:51 +00:00
|
|
|
#include <fcntl.h>
|
2015-10-30 18:52:30 +00:00
|
|
|
|
|
|
|
#include <rte_malloc.h>
|
2021-01-29 16:48:19 +00:00
|
|
|
#include <ethdev_driver.h>
|
2015-10-30 18:52:30 +00:00
|
|
|
#include <rte_pci.h>
|
2022-07-28 15:26:30 +00:00
|
|
|
#include <bus_pci_driver.h>
|
2015-10-30 18:52:30 +00:00
|
|
|
#include <rte_common.h>
|
2016-06-24 13:17:50 +00:00
|
|
|
#include <rte_kvargs.h>
|
2018-05-24 14:36:49 +00:00
|
|
|
#include <rte_rwlock.h>
|
|
|
|
#include <rte_spinlock.h>
|
2018-07-10 16:04:48 +00:00
|
|
|
#include <rte_string_fns.h>
|
2021-11-03 18:35:13 +00:00
|
|
|
#include <rte_eal_paging.h>
|
2019-07-16 14:34:55 +00:00
|
|
|
#include <rte_alarm.h>
|
2020-12-28 09:54:09 +00:00
|
|
|
#include <rte_cycles.h>
|
2022-06-16 08:41:51 +00:00
|
|
|
#include <rte_interrupts.h>
|
2015-10-30 18:52:30 +00:00
|
|
|
|
2020-01-29 12:38:27 +00:00
|
|
|
#include <mlx5_glue.h>
|
|
|
|
#include <mlx5_devx_cmds.h>
|
2020-01-29 12:38:29 +00:00
|
|
|
#include <mlx5_common.h>
|
2020-06-19 07:30:08 +00:00
|
|
|
#include <mlx5_common_os.h>
|
2020-04-13 21:17:47 +00:00
|
|
|
#include <mlx5_common_mp.h>
|
2020-06-28 07:35:26 +00:00
|
|
|
#include <mlx5_malloc.h>
|
2020-01-29 12:38:27 +00:00
|
|
|
|
|
|
|
#include "mlx5_defs.h"
|
2015-10-30 18:52:30 +00:00
|
|
|
#include "mlx5.h"
|
|
|
|
#include "mlx5_utils.h"
|
2015-10-30 18:52:31 +00:00
|
|
|
#include "mlx5_rxtx.h"
|
2021-04-12 06:32:20 +00:00
|
|
|
#include "mlx5_rx.h"
|
2021-04-12 06:32:22 +00:00
|
|
|
#include "mlx5_tx.h"
|
2015-10-30 18:52:30 +00:00
|
|
|
#include "mlx5_autoconf.h"
|
2018-09-24 23:17:39 +00:00
|
|
|
#include "mlx5_flow.h"
|
2020-12-28 09:54:15 +00:00
|
|
|
#include "mlx5_flow_os.h"
|
2020-01-29 12:21:06 +00:00
|
|
|
#include "rte_pmd_mlx5.h"
|
2015-10-30 18:52:30 +00:00
|
|
|
|
2021-07-21 14:37:34 +00:00
|
|
|
#define MLX5_ETH_DRIVER_NAME mlx5_eth
|
|
|
|
|
2016-06-24 13:17:54 +00:00
|
|
|
/* Device parameter to enable RX completion queue compression. */
|
|
|
|
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
|
|
|
|
|
2019-01-15 17:38:58 +00:00
|
|
|
/* Device parameter to enable padding Rx packet to cacheline size. */
|
|
|
|
#define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en"
|
|
|
|
|
2018-05-09 11:13:50 +00:00
|
|
|
/* Device parameter to enable Multi-Packet Rx queue. */
|
|
|
|
#define MLX5_RX_MPRQ_EN "mprq_en"
|
|
|
|
|
|
|
|
/* Device parameter to configure log 2 of the number of strides for MPRQ. */
|
|
|
|
#define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
|
|
|
|
|
2020-04-09 22:23:51 +00:00
|
|
|
/* Device parameter to configure log 2 of the stride size for MPRQ. */
|
|
|
|
#define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size"
|
|
|
|
|
2018-05-09 11:13:50 +00:00
|
|
|
/* Device parameter to limit the size of memcpy'd packet for MPRQ. */
|
|
|
|
#define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
|
|
|
|
|
|
|
|
/* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
|
|
|
|
#define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
|
|
|
|
|
2019-07-21 14:24:53 +00:00
|
|
|
/* Device parameter to configure inline send. Deprecated, ignored.*/
|
2016-06-24 13:17:56 +00:00
|
|
|
#define MLX5_TXQ_INLINE "txq_inline"
|
|
|
|
|
2019-07-21 14:24:54 +00:00
|
|
|
/* Device parameter to limit packet size to inline with ordinary SEND. */
|
|
|
|
#define MLX5_TXQ_INLINE_MAX "txq_inline_max"
|
|
|
|
|
|
|
|
/* Device parameter to configure minimal data size to inline. */
|
|
|
|
#define MLX5_TXQ_INLINE_MIN "txq_inline_min"
|
|
|
|
|
|
|
|
/* Device parameter to limit packet size to inline with Enhanced MPW. */
|
|
|
|
#define MLX5_TXQ_INLINE_MPW "txq_inline_mpw"
|
|
|
|
|
2016-06-24 13:17:56 +00:00
|
|
|
/*
|
|
|
|
* Device parameter to configure the number of TX queues threshold for
|
|
|
|
* enabling inline send.
|
|
|
|
*/
|
|
|
|
#define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
|
|
|
|
|
2018-11-01 17:20:32 +00:00
|
|
|
/*
|
|
|
|
* Device parameter to configure the number of TX queues threshold for
|
2019-07-21 14:24:53 +00:00
|
|
|
* enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines).
|
2018-11-01 17:20:32 +00:00
|
|
|
*/
|
|
|
|
#define MLX5_TXQS_MAX_VEC "txqs_max_vec"
|
|
|
|
|
2016-06-24 13:17:57 +00:00
|
|
|
/* Device parameter to enable multi-packet send WQEs. */
|
|
|
|
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
|
|
|
|
|
2019-07-21 14:24:53 +00:00
|
|
|
/*
|
|
|
|
* Device parameter to include 2 dsegs in the title WQEBB.
|
|
|
|
* Deprecated, ignored.
|
|
|
|
*/
|
2017-03-15 23:55:44 +00:00
|
|
|
#define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
|
|
|
|
|
2019-07-21 14:24:53 +00:00
|
|
|
/*
|
|
|
|
* Device parameter to limit the size of inlining packet.
|
|
|
|
* Deprecated, ignored.
|
|
|
|
*/
|
2017-03-15 23:55:44 +00:00
|
|
|
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
|
|
|
|
|
2020-07-16 08:23:05 +00:00
|
|
|
/*
|
|
|
|
* Device parameter to enable Tx scheduling on timestamps
|
|
|
|
* and specify the packet pacing granularity in nanoseconds.
|
|
|
|
*/
|
|
|
|
#define MLX5_TX_PP "tx_pp"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Device parameter to specify skew in nanoseconds on Tx datapath,
|
|
|
|
* it represents the time between SQ start WQE processing and
|
|
|
|
* appearing actual packet data on the wire.
|
|
|
|
*/
|
|
|
|
#define MLX5_TX_SKEW "tx_skew"
|
|
|
|
|
2019-07-21 14:24:53 +00:00
|
|
|
/*
|
|
|
|
* Device parameter to enable hardware Tx vector.
|
|
|
|
* Deprecated, ignored (no vectorized Tx routines anymore).
|
|
|
|
*/
|
2017-08-02 15:32:56 +00:00
|
|
|
#define MLX5_TX_VEC_EN "tx_vec_en"
|
|
|
|
|
|
|
|
/* Device parameter to enable hardware Rx vector. */
|
|
|
|
#define MLX5_RX_VEC_EN "rx_vec_en"
|
|
|
|
|
2018-04-23 12:33:02 +00:00
|
|
|
/* Allow L3 VXLAN flow creation. */
|
|
|
|
#define MLX5_L3_VXLAN_EN "l3_vxlan_en"
|
|
|
|
|
2019-04-18 13:16:01 +00:00
|
|
|
/* Activate DV E-Switch flow steering. */
|
|
|
|
#define MLX5_DV_ESW_EN "dv_esw_en"
|
|
|
|
|
2018-09-24 23:17:54 +00:00
|
|
|
/* Activate DV flow steering. */
|
|
|
|
#define MLX5_DV_FLOW_EN "dv_flow_en"
|
|
|
|
|
net/mlx5: add devarg for extensive metadata support
The PMD parameter dv_xmeta_en is added to control extensive
metadata support. A nonzero value enables extensive flow
metadata support if device is capable and driver supports it.
This can enable extensive support of MARK and META item of
rte_flow. The newly introduced SET_TAG and SET_META actions
do not depend on dv_xmeta_en parameter, because there is
no compatibility issue for new entities. The dv_xmeta_en is
disabled by default.
There are some possible configurations, depending on parameter
value:
- 0, this is default value, defines the legacy mode, the MARK
and META related actions and items operate only within NIC Tx
and NIC Rx steering domains, no MARK and META information
crosses the domain boundaries. The MARK item is 24 bits wide,
the META item is 32 bits wide.
- 1, this engages extensive metadata mode, the MARK and META
related actions and items operate within all supported steering
domains, including FDB, MARK and META information may cross
the domain boundaries. The ``MARK`` item is 24 bits wide, the
META item width depends on kernel and firmware configurations
and might be 0, 16 or 32 bits. Within NIC Tx domain META data
width is 32 bits for compatibility, the actual width of data
transferred to the FDB domain depends on kernel configuration
and may be vary. The actual supported width can be retrieved
in runtime by series of rte_flow_validate() trials.
- 2, this engages extensive metadata mode, the MARK and META
related actions and items operate within all supported steering
domains, including FDB, MARK and META information may cross
the domain boundaries. The META item is 32 bits wide, the MARK
item width depends on kernel and firmware configurations and
might be 0, 16 or 24 bits. The actual supported width can be
retrieved in runtime by series of rte_flow_validate() trials.
If there is no E-Switch configuration the ``dv_xmeta_en`` parameter is
ignored and the device is configured to operate in legacy mode (0).
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
2019-11-07 17:09:54 +00:00
|
|
|
/* Enable extensive flow metadata support. */
|
|
|
|
#define MLX5_DV_XMETA_EN "dv_xmeta_en"
|
|
|
|
|
2020-06-23 08:41:07 +00:00
|
|
|
/* Device parameter to let the user manage the lacp traffic of bonded device */
|
|
|
|
#define MLX5_LACP_BY_USER "lacp_by_user"
|
|
|
|
|
2018-04-05 15:07:21 +00:00
|
|
|
/* Activate Netlink support in VF mode. */
|
|
|
|
#define MLX5_VF_NL_EN "vf_nl_en"
|
|
|
|
|
2018-07-10 16:04:58 +00:00
|
|
|
/* Select port representors to instantiate. */
|
|
|
|
#define MLX5_REPRESENTOR "representor"
|
|
|
|
|
2019-05-30 10:20:32 +00:00
|
|
|
/* Device parameter to configure the maximum number of dump files per queue. */
|
|
|
|
#define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num"
|
|
|
|
|
2019-07-22 14:51:59 +00:00
|
|
|
/* Configure timeout of LRO session (in microseconds). */
|
|
|
|
#define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec"
|
|
|
|
|
2020-03-24 12:59:01 +00:00
|
|
|
/*
|
|
|
|
* Device parameter to configure the total data buffer size for a single
|
|
|
|
* hairpin queue (logarithm value).
|
|
|
|
*/
|
|
|
|
#define MLX5_HP_BUF_SIZE "hp_buf_log_sz"
|
|
|
|
|
net/mlx5: add reclaim memory mode
Currently, when flow destroyed, some memory resources may still be kept
as cached to help next time create flow more efficiently.
Some system may need the resources to be more flexible with flow create
and destroy. After peak time, with millions of flows destroyed, the
system would prefer the resources to be reclaimed completely, no cache
is needed. Then the resources can be allocated and used by other
components. The system is not so sensitive about the flow insertion
rate, but more care about the resources.
Both DPDK mlx5 PMD driver and the low level component rdma-core have
provided the flow resources to be configured cached or not, but there is
no APIs or parameters exposed to user to configure the flow resources
cache mode. In this case, introduce a new PMD devarg to let user
configure the flow resources cache mode will be helpful.
This commit is to add a new "reclaim_mem_mode" to help user configure if
the destroyed flows' cache resources should be kept or not.
Their will be three mode can be chosen:
1. 0(none). It means the flow resources will be cached as usual. The
resources will be cached, helpful with flow insertion rate.
2. 1(light). It will only enable the DPDK PMD level resources reclaim.
3. 2(aggressive). Both DPDK PMD level and rdma-core low level will be
configured as reclaimed mode.
With these three mode, user can configure the resources cache mode with
different levels.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
2020-06-01 06:09:43 +00:00
|
|
|
/* Flow memory reclaim mode. */
|
|
|
|
#define MLX5_RECLAIM_MEM "reclaim_mem_mode"
|
|
|
|
|
2020-07-15 13:10:21 +00:00
|
|
|
/* Decap will be used or not. */
|
|
|
|
#define MLX5_DECAP_EN "decap_en"
|
net/mlx5: add option to allocate memory from system
Currently, for MLX5 PMD, once millions of flows created, the memory
consumption of the flows are also very huge. For the system with limited
memory, it means the system need to reserve most of the memory as huge
page memory to serve the flows in advance. And other normal applications
will have no chance to use this reserved memory any more. While most of
the time, the system will not have lots of flows, the reserved huge
page memory becomes a bit waste of memory at most of the time.
By the new sys_mem_en devarg, once set it to be true, it allows the PMD
allocate the memory from system by default with the new add mlx5 memory
management functions. Only once the MLX5_MEM_RTE flag is set, the memory
will be allocate from rte, otherwise, it allocates memory from system.
So in this case, the system with limited memory no need to reserve most
of the memory for hugepage. Only some needed memory for datapath objects
will be enough to allocated with explicitly flag. Other memory will be
allocated from system. For system with enough memory, no need to care
about the devarg, the memory will always be from rte hugepage.
One restriction is that for DPDK application with multiple PCI devices,
if the sys_mem_en devargs are different between the devices, the
sys_mem_en only gets the value from the first device devargs, and print
out a message to warn that.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
2020-06-28 03:41:57 +00:00
|
|
|
|
2021-07-06 08:12:27 +00:00
|
|
|
/* Device parameter to configure allow or prevent duplicate rules pattern. */
|
|
|
|
#define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern"
|
|
|
|
|
2021-11-05 15:30:38 +00:00
|
|
|
/* Device parameter to configure the delay drop when creating Rxqs. */
|
|
|
|
#define MLX5_DELAY_DROP "delay_drop"
|
|
|
|
|
2022-10-20 15:41:39 +00:00
|
|
|
/* Device parameter to create the fdb default rule in PMD */
|
|
|
|
#define MLX5_FDB_DEFAULT_RULE_EN "fdb_def_rule_en"
|
|
|
|
|
net/mlx5: support flow counter action for HWS
This commit adds HW steering counter action support.
The pool mechanism is the basic data structure for the HW steering
counter.
The HW steering's counter pool is based on the rte_ring of zero-copy
variation.
There are two global rte_rings:
1. free_list:
Store the counters indexes, which are ready for use.
2. wait_reset_list:
Store the counters indexes, which are just freed from the user and
need to query the hardware counter to get the reset value before
this counter can be reused again.
The counter pool also supports cache per HW steering's queues, which are
also based on the rte_ring of zero-copy variation.
The cache can be configured in size, preload, threshold, and fetch size,
they are all exposed via device args.
The main operations of the counter pool are as follows:
- Get one counter from the pool:
1. The user call _get_* API.
2. If the cache is enabled, dequeue one counter index from the local
cache:
2. A: if the dequeued one from the local cache is still in reset
status (counter's query_gen_when_free is equal to pool's query
gen):
I. Flush all counters in the local cache back to global
wait_reset_list.
II. Fetch _fetch_sz_ counters into the cache from the global
free list.
III. Fetch one counter from the cache.
3. If the cache is empty, fetch _fetch_sz_ counters from the global
free list into the cache and fetch one counter from the cache.
- Free one counter into the pool:
1. The user calls _put_* API.
2. Put the counter into the local cache.
3. If the local cache is full:
A: Write back all counters above _threshold_ into the global
wait_reset_list.
B: Also, write back this counter into the global wait_reset_list.
When the local cache is disabled, _get_/_put_ cache directly from/into
global list.
Signed-off-by: Xiaoyu Min <jackmin@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:42 +00:00
|
|
|
/* HW steering counter configuration. */
|
|
|
|
#define MLX5_HWS_CNT_SERVICE_CORE "service_core"
|
|
|
|
|
|
|
|
/* HW steering counter's query interval. */
|
|
|
|
#define MLX5_HWS_CNT_CYCLE_TIME "svc_cycle_time"
|
|
|
|
|
net/mlx5: support device control of representor matching
In some E-Switch use cases, applications want to receive all traffic
on a single port. Since currently, flow API does not provide a way to
match traffic forwarded to any port representor, this patch adds
support for controlling representor matching on ingress flow rules.
Representor matching is controlled through a new device argument
repr_matching_en.
- If representor matching is enabled (default setting),
then each ingress pattern template has an implicit REPRESENTED_PORT
item added. Flow rules based on this pattern template will match
the vport associated with the port on which the rule is created.
- If representor matching is disabled, then there will be no implicit
item added. As a result ingress flow rules will match traffic
coming to any port, not only the port on which the flow rule is
created.
Representor matching is enabled by default, to provide an expected
default behavior.
This patch enables egress flow rules on representors when E-Switch is
enabled in the following configurations:
- repr_matching_en=1 and dv_xmeta_en=4
- repr_matching_en=1 and dv_xmeta_en=0
- repr_matching_en=0 and dv_xmeta_en=0
When representor matching is enabled, the following logic is
implemented:
1. Creating an egress template table in group 0 for each port. These
tables will hold default flow rules defined as follows:
pattern SQ
actions MODIFY_FIELD (set available bits in REG_C_0 to
vport_meta_tag)
MODIFY_FIELD (copy REG_A to REG_C_1, only when
dv_xmeta_en == 4)
JUMP (group 1)
2. Egress pattern templates created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches available bits of REG_C_0.
3. Egress flow rules created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches vport_meta_tag placed in available bits of REG_C_0.
4. Egress template tables created by an application, which are in
group n, are placed in group n + 1.
5. Items and actions related to META are operating on REG_A when
dv_xmeta_en == 0 or REG_C_1 when dv_xmeta_en == 4.
When representor matching is disabled and extended metadata is disabled,
no changes to the current logic are required.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:51 +00:00
|
|
|
/* Device parameter to control representor matching in ingress/egress flows with HWS. */
|
|
|
|
#define MLX5_REPR_MATCHING_EN "repr_matching_en"
|
|
|
|
|
net/mlx5: add new memory region support
This is the new design of Memory Region (MR) for mlx PMD, in order to:
- Accommodate the new memory hotplug model.
- Support non-contiguous Mempool.
There are multiple layers for MR search.
L0 is to look up the last-hit entry which is pointed by mr_ctrl->mru (Most
Recently Used). If L0 misses, L1 is to look up the address in a fixed-sized
array by linear search. L0/L1 is in an inline function -
mlx5_mr_lookup_cache().
If L1 misses, the bottom-half function is called to look up the address
from the bigger local cache of the queue. This is L2 - mlx5_mr_addr2mr_bh()
and it is not an inline function. Data structure for L2 is the Binary Tree.
If L2 misses, the search falls into the slowest path which takes locks in
order to access global device cache (priv->mr.cache) which is also a B-tree
and caches the original MR list (priv->mr.mr_list) of the device. Unless
the global cache is overflowed, it is all-inclusive of the MR list. This is
L3 - mlx5_mr_lookup_dev(). The size of the L3 cache table is limited and
can't be expanded on the fly due to deadlock. Refer to the comments in the
code for the details - mr_lookup_dev(). If L3 is overflowed, the list will
have to be searched directly bypassing the cache although it is slower.
If L3 misses, a new MR for the address should be created -
mlx5_mr_create(). When it creates a new MR, it tries to register adjacent
memsegs as much as possible which are virtually contiguous around the
address. This must take two locks - memory_hotplug_lock and
priv->mr.rwlock. Due to memory_hotplug_lock, there can't be any
allocation/free of memory inside.
In the free callback of the memory hotplug event, freed space is searched
from the MR list and corresponding bits are cleared from the bitmap of MRs.
This can fragment a MR and the MR will have multiple search entries in the
caches. Once there's a change by the event, the global cache must be
rebuilt and all the per-queue caches will be flushed as well. If memory is
frequently freed in run-time, that may cause jitter on dataplane processing
in the worst case by incurring MR cache flush and rebuild. But, it would be
the least probable scenario.
To guarantee the most optimal performance, it is highly recommended to use
an EAL option - '--socket-mem'. Then, the reserved memory will be pinned
and won't be freed dynamically. And it is also recommended to configure
per-lcore cache of Mempool. Even though there're many MRs for a device or
MRs are highly fragmented, the cache of Mempool will be much helpful to
reduce misses on per-queue caches anyway.
'--legacy-mem' is also supported.
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
2018-05-09 11:09:04 +00:00
|
|
|
/* Shared memory between primary and secondary processes. */
|
|
|
|
struct mlx5_shared_data *mlx5_shared_data;
|
|
|
|
|
2020-07-19 10:18:15 +00:00
|
|
|
/** Driver-specific log messages type. */
|
|
|
|
int mlx5_logtype;
|
2018-03-13 09:23:56 +00:00
|
|
|
|
2020-06-10 09:32:27 +00:00
|
|
|
static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
|
|
|
|
LIST_HEAD_INITIALIZER();
|
2020-12-28 12:32:59 +00:00
|
|
|
static pthread_mutex_t mlx5_dev_ctx_list_mutex;
|
2020-06-08 16:01:56 +00:00
|
|
|
static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
|
2020-12-28 12:32:55 +00:00
|
|
|
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_DECAP_ENCAP] = {
|
2020-04-16 02:42:02 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-04-16 02:42:02 +00:00
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 02:42:02 +00:00
|
|
|
.type = "mlx5_encap_decap_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_PUSH_VLAN] = {
|
2020-04-16 02:42:03 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-04-16 02:42:03 +00:00
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 02:42:03 +00:00
|
|
|
.type = "mlx5_push_vlan_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_TAG] = {
|
2020-04-16 02:42:04 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_dv_tag_resource),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2021-07-13 08:44:56 +00:00
|
|
|
.release_mem_en = 0,
|
|
|
|
.per_core_cache = (1 << 16),
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 02:42:04 +00:00
|
|
|
.type = "mlx5_tag_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_PORT_ID] = {
|
2020-04-16 02:42:05 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-04-16 02:42:05 +00:00
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 02:42:05 +00:00
|
|
|
.type = "mlx5_port_id_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_JUMP] = {
|
2020-04-16 02:42:06 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_tbl_data_entry),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-04-16 02:42:06 +00:00
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 02:42:06 +00:00
|
|
|
.type = "mlx5_jump_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_SAMPLE] = {
|
2020-10-13 14:11:46 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_dv_sample_resource),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-10-13 14:11:46 +00:00
|
|
|
.release_mem_en = 1,
|
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
|
|
|
.type = "mlx5_sample_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_DEST_ARRAY] = {
|
2020-10-13 14:11:50 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-10-13 14:11:50 +00:00
|
|
|
.release_mem_en = 1,
|
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
|
|
|
.type = "mlx5_dest_array_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_TUNNEL_ID] = {
|
|
|
|
.size = sizeof(struct mlx5_flow_tunnel),
|
2020-12-07 05:58:34 +00:00
|
|
|
.trunk_size = MLX5_MAX_TUNNELS,
|
2020-11-16 14:02:21 +00:00
|
|
|
.need_lock = 1,
|
|
|
|
.release_mem_en = 1,
|
|
|
|
.type = "mlx5_tunnel_offload",
|
|
|
|
},
|
|
|
|
[MLX5_IPOOL_TNL_TBL_ID] = {
|
|
|
|
.size = 0,
|
|
|
|
.need_lock = 1,
|
|
|
|
.type = "mlx5_flow_tnl_tbl_ipool",
|
|
|
|
},
|
2020-04-16 02:42:08 +00:00
|
|
|
#endif
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_MTR] = {
|
2021-04-20 10:55:10 +00:00
|
|
|
/**
|
|
|
|
* The ipool index should grow continually from small to big,
|
|
|
|
* for meter idx, so not set grow_trunk to avoid meter index
|
|
|
|
* not jump continually.
|
|
|
|
*/
|
2021-04-20 10:55:16 +00:00
|
|
|
.size = sizeof(struct mlx5_legacy_flow_meter),
|
2020-04-16 08:34:26 +00:00
|
|
|
.trunk_size = 64,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-04-16 08:34:26 +00:00
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 08:34:26 +00:00
|
|
|
.type = "mlx5_meter_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_MCP] = {
|
2020-04-16 08:34:27 +00:00
|
|
|
.size = sizeof(struct mlx5_flow_mreg_copy_resource),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-04-16 08:34:27 +00:00
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 08:34:27 +00:00
|
|
|
.type = "mlx5_mcp_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_HRXQ] = {
|
2020-04-16 02:42:07 +00:00
|
|
|
.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2020-04-16 02:42:07 +00:00
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 02:42:07 +00:00
|
|
|
.type = "mlx5_hrxq_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_MLX5_FLOW] = {
|
2020-06-08 16:01:56 +00:00
|
|
|
/*
|
|
|
|
* MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
|
|
|
|
* It set in run time according to PCI function configuration.
|
|
|
|
*/
|
|
|
|
.size = 0,
|
2020-04-16 02:42:08 +00:00
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
2020-10-28 09:33:20 +00:00
|
|
|
.need_lock = 1,
|
2021-07-13 08:44:39 +00:00
|
|
|
.release_mem_en = 0,
|
|
|
|
.per_core_cache = 1 << 19,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 02:42:08 +00:00
|
|
|
.type = "mlx5_flow_handle_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_RTE_FLOW] = {
|
2020-04-16 08:34:30 +00:00
|
|
|
.size = sizeof(struct rte_flow),
|
|
|
|
.trunk_size = 4096,
|
|
|
|
.need_lock = 1,
|
|
|
|
.release_mem_en = 1,
|
2020-06-28 07:35:26 +00:00
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
2020-04-16 08:34:30 +00:00
|
|
|
.type = "rte_flow_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
|
2020-10-28 09:33:24 +00:00
|
|
|
.size = 0,
|
|
|
|
.need_lock = 1,
|
|
|
|
.type = "mlx5_flow_rss_id_ipool",
|
|
|
|
},
|
2020-11-16 14:02:21 +00:00
|
|
|
[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
|
2020-11-01 17:57:50 +00:00
|
|
|
.size = sizeof(struct mlx5_shared_action_rss),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.grow_trunk = 3,
|
|
|
|
.grow_shift = 2,
|
|
|
|
.need_lock = 1,
|
|
|
|
.release_mem_en = 1,
|
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
|
|
|
.type = "mlx5_shared_action_rss",
|
|
|
|
},
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
[MLX5_IPOOL_MTR_POLICY] = {
|
|
|
|
/**
|
|
|
|
* The ipool index should grow continually from small to big,
|
|
|
|
* for policy idx, so not set grow_trunk to avoid policy index
|
|
|
|
* not jump continually.
|
|
|
|
*/
|
|
|
|
.size = sizeof(struct mlx5_flow_meter_sub_policy),
|
|
|
|
.trunk_size = 64,
|
|
|
|
.need_lock = 1,
|
|
|
|
.release_mem_en = 1,
|
|
|
|
.malloc = mlx5_malloc,
|
|
|
|
.free = mlx5_free,
|
|
|
|
.type = "mlx5_meter_policy_ipool",
|
|
|
|
},
|
2020-04-16 02:42:02 +00:00
|
|
|
};
|
|
|
|
|
2019-10-30 23:53:21 +00:00
|
|
|
#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
|
|
|
|
#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
|
|
|
|
|
2021-07-13 08:44:55 +00:00
|
|
|
#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024
|
2019-11-08 15:23:08 +00:00
|
|
|
|
net/mlx5: refactor bonding representor probing
To probe representor on 2nd PF of kernel bonding device, had to specify
PF1 BDF in devarg:
<PF1_BDF>,representor=0
When closing bonding device, all representors had to be closed together
and this implies all representors have to use primary PF of bonding
device. So after probing representor port on 2nd PF, when locating new
probed device using device argument, the filter used 2nd PF as PCI
address and failed to locate new device.
Conflict happened by using current representor devargs:
- Use PCI BDF to specify representor owner PF
- Use PCI BDF to locate probed representor device.
- PMD uses primary PCI BDF as PCI device.
To resolve such conflicts, new representor syntax is introduced here:
<primary BDF>,representor=pfXvfY
All representors must use primary PF as owner PCI device, PMD internally
locate owner PCI address by checking representor "pfX" part. To EAL, all
representors are registered to primary PCI device, the 2nd PF is hidden
to EAL, thus all search should be consistent.
Same to VF representor, HPF (host PF on BlueField) uses same syntax to
probe, example: representor=pf1vf[0-3,-1]
This patch also adds pf index into kernel bonding representor port name:
<BDF>_<ib_name>_representor_pf<X>vf<Y>
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2021-03-28 13:48:10 +00:00
|
|
|
/**
|
|
|
|
* Decide whether representor ID is a HPF(host PF) port on BF2.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Non-zero if HPF, otherwise 0.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
mlx5_is_hpf(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
|
|
|
uint16_t repr = MLX5_REPRESENTOR_REPR(priv->representor_id);
|
|
|
|
int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
|
|
|
|
|
|
|
|
return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_VF &&
|
|
|
|
MLX5_REPRESENTOR_REPR(-1) == repr;
|
|
|
|
}
|
|
|
|
|
2021-07-21 14:37:35 +00:00
|
|
|
/**
|
|
|
|
* Decide whether representor ID is a SF port representor.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Non-zero if HPF, otherwise 0.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
mlx5_is_sf_repr(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
|
|
|
int type = MLX5_REPRESENTOR_TYPE(priv->representor_id);
|
|
|
|
|
|
|
|
return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF;
|
|
|
|
}
|
|
|
|
|
2020-11-01 17:57:49 +00:00
|
|
|
/**
|
|
|
|
* Initialize the ASO aging management structure.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (sh->aso_age_mng)
|
|
|
|
return 0;
|
|
|
|
sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng),
|
|
|
|
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
|
|
|
|
if (!sh->aso_age_mng) {
|
|
|
|
DRV_LOG(ERR, "aso_age_mng allocation was failed.");
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2022-10-20 15:41:46 +00:00
|
|
|
err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT, 1);
|
2020-11-01 17:57:49 +00:00
|
|
|
if (err) {
|
|
|
|
mlx5_free(sh->aso_age_mng);
|
|
|
|
return -1;
|
|
|
|
}
|
2021-11-01 06:30:39 +00:00
|
|
|
rte_rwlock_init(&sh->aso_age_mng->resize_rwl);
|
2020-11-01 17:57:49 +00:00
|
|
|
rte_spinlock_init(&sh->aso_age_mng->free_sl);
|
|
|
|
LIST_INIT(&sh->aso_age_mng->free);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Close and release all the resources of the ASO aging management structure.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
2021-04-20 10:55:17 +00:00
|
|
|
mlx5_aso_flow_hit_queue_poll_stop(sh);
|
|
|
|
mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT);
|
2020-11-01 17:57:49 +00:00
|
|
|
if (sh->aso_age_mng->pools) {
|
|
|
|
struct mlx5_aso_age_pool *pool;
|
|
|
|
|
|
|
|
for (i = 0; i < sh->aso_age_mng->next; ++i) {
|
|
|
|
pool = sh->aso_age_mng->pools[i];
|
|
|
|
claim_zero(mlx5_devx_cmd_destroy
|
|
|
|
(pool->flow_hit_aso_obj));
|
|
|
|
for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j)
|
|
|
|
if (pool->actions[j].dr_action)
|
|
|
|
claim_zero
|
2020-12-28 09:54:15 +00:00
|
|
|
(mlx5_flow_os_destroy_flow_action
|
|
|
|
(pool->actions[j].dr_action));
|
2020-11-01 17:57:49 +00:00
|
|
|
mlx5_free(pool);
|
|
|
|
}
|
|
|
|
mlx5_free(sh->aso_age_mng->pools);
|
|
|
|
}
|
2020-11-18 08:59:55 +00:00
|
|
|
mlx5_free(sh->aso_age_mng);
|
2020-11-01 17:57:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-29 02:25:09 +00:00
|
|
|
/**
|
|
|
|
* Initialize the shared aging list information per port.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
2020-06-03 15:05:55 +00:00
|
|
|
* Pointer to mlx5_dev_ctx_shared object.
|
2020-04-29 02:25:09 +00:00
|
|
|
*/
|
|
|
|
static void
|
2020-06-03 15:05:55 +00:00
|
|
|
mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
|
2020-04-29 02:25:09 +00:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
struct mlx5_age_info *age_info;
|
|
|
|
|
2022-10-20 15:41:47 +00:00
|
|
|
/*
|
|
|
|
* In HW steering, aging information structure is initialized later
|
|
|
|
* during configure function.
|
|
|
|
*/
|
|
|
|
if (sh->config.dv_flow_en == 2)
|
|
|
|
return;
|
2020-04-29 02:25:09 +00:00
|
|
|
for (i = 0; i < sh->max_port; i++) {
|
|
|
|
age_info = &sh->port[i].age_info;
|
|
|
|
age_info->flags = 0;
|
|
|
|
TAILQ_INIT(&age_info->aged_counters);
|
2020-11-01 17:57:52 +00:00
|
|
|
LIST_INIT(&age_info->aged_aso);
|
2020-04-29 02:25:09 +00:00
|
|
|
rte_spinlock_init(&age_info->aged_sl);
|
|
|
|
MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-14 09:35:03 +00:00
|
|
|
/**
|
|
|
|
* DV flow counter mode detect and config.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to rte_eth_dev structure.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
|
|
|
|
{
|
|
|
|
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
|
|
|
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
|
|
|
struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
|
|
|
|
bool fallback;
|
|
|
|
|
|
|
|
#ifndef HAVE_IBV_DEVX_ASYNC
|
|
|
|
fallback = true;
|
|
|
|
#else
|
|
|
|
fallback = false;
|
2022-02-14 09:35:07 +00:00
|
|
|
if (!sh->cdev->config.devx || !sh->config.dv_flow_en ||
|
2022-02-14 09:35:03 +00:00
|
|
|
!hca_attr->flow_counters_dump ||
|
|
|
|
!(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
|
|
|
|
(mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
|
|
|
|
fallback = true;
|
|
|
|
#endif
|
|
|
|
if (fallback)
|
|
|
|
DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
|
|
|
|
"counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
|
|
|
|
hca_attr->flow_counters_dump,
|
|
|
|
hca_attr->flow_counter_bulk_alloc_bitmap);
|
|
|
|
/* Initialize fallback mode only on the port initializes sh. */
|
|
|
|
if (sh->refcnt == 1)
|
2022-10-20 15:41:47 +00:00
|
|
|
sh->sws_cmng.counter_fallback = fallback;
|
|
|
|
else if (fallback != sh->sws_cmng.counter_fallback)
|
2022-02-14 09:35:03 +00:00
|
|
|
DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
|
|
|
|
"with others:%d.", PORT_ID(priv), fallback);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-07-16 14:34:53 +00:00
|
|
|
/**
|
|
|
|
* Initialize the counters management structure.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
2020-06-03 15:05:55 +00:00
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free
|
2022-10-31 16:08:20 +00:00
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, otherwise negative errno value and rte_errno is set.
|
2019-07-16 14:34:53 +00:00
|
|
|
*/
|
2022-10-31 16:08:20 +00:00
|
|
|
static int
|
2020-06-03 15:05:55 +00:00
|
|
|
mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
|
2019-07-16 14:34:53 +00:00
|
|
|
{
|
2022-10-20 15:41:47 +00:00
|
|
|
int i, j;
|
|
|
|
|
|
|
|
if (sh->config.dv_flow_en < 2) {
|
2022-10-31 16:08:20 +00:00
|
|
|
void *pools;
|
|
|
|
|
|
|
|
pools = mlx5_malloc(MLX5_MEM_ZERO,
|
|
|
|
sizeof(struct mlx5_flow_counter_pool *) *
|
|
|
|
MLX5_COUNTER_POOLS_MAX_NUM,
|
|
|
|
0, SOCKET_ID_ANY);
|
|
|
|
if (!pools) {
|
|
|
|
DRV_LOG(ERR,
|
|
|
|
"Counter management allocation was failed.");
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2022-10-20 15:41:47 +00:00
|
|
|
memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
|
|
|
|
TAILQ_INIT(&sh->sws_cmng.flow_counters);
|
|
|
|
sh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET;
|
|
|
|
sh->sws_cmng.max_id = -1;
|
|
|
|
sh->sws_cmng.last_pool_idx = POOL_IDX_INVALID;
|
2022-10-31 16:08:20 +00:00
|
|
|
sh->sws_cmng.pools = pools;
|
2022-10-20 15:41:47 +00:00
|
|
|
rte_spinlock_init(&sh->sws_cmng.pool_update_sl);
|
|
|
|
for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
|
|
|
|
TAILQ_INIT(&sh->sws_cmng.counters[i]);
|
|
|
|
rte_spinlock_init(&sh->sws_cmng.csl[i]);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
|
|
|
|
uint32_t fw_max_nb_cnts = attr->max_flow_counter;
|
|
|
|
uint8_t log_dcs = log2above(fw_max_nb_cnts) - 1;
|
|
|
|
uint32_t max_nb_cnts = 0;
|
|
|
|
|
|
|
|
for (i = 0, j = 0; j < MLX5_HWS_CNT_DCS_NUM; ++i) {
|
|
|
|
int log_dcs_i = log_dcs - i;
|
|
|
|
|
|
|
|
if (log_dcs_i < 0)
|
|
|
|
break;
|
|
|
|
if ((max_nb_cnts | RTE_BIT32(log_dcs_i)) >
|
|
|
|
fw_max_nb_cnts)
|
|
|
|
continue;
|
|
|
|
max_nb_cnts |= RTE_BIT32(log_dcs_i);
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
sh->hws_max_log_bulk_sz = log_dcs;
|
|
|
|
sh->hws_max_nb_counters = max_nb_cnts;
|
2020-04-29 02:25:09 +00:00
|
|
|
}
|
2022-10-31 16:08:20 +00:00
|
|
|
return 0;
|
2019-07-16 14:34:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Destroy all the resources allocated for a counter memory management.
|
|
|
|
*
|
|
|
|
* @param[in] mng
|
|
|
|
* Pointer to the memory management structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
|
|
|
|
{
|
|
|
|
uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
|
|
|
|
|
|
|
|
LIST_REMOVE(mng, next);
|
2021-11-09 12:36:12 +00:00
|
|
|
mlx5_os_wrapped_mkey_destroy(&mng->wm);
|
2020-06-28 07:35:26 +00:00
|
|
|
mlx5_free(mem);
|
2019-07-16 14:34:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Close and release all the resources of the counters management.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
2020-06-03 15:05:55 +00:00
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free.
|
2019-07-16 14:34:53 +00:00
|
|
|
*/
|
|
|
|
static void
|
2020-06-03 15:05:55 +00:00
|
|
|
mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
|
2019-07-16 14:34:53 +00:00
|
|
|
{
|
|
|
|
struct mlx5_counter_stats_mem_mng *mng;
|
2020-10-20 03:02:24 +00:00
|
|
|
int i, j;
|
2019-07-16 14:34:55 +00:00
|
|
|
int retries = 1024;
|
|
|
|
|
|
|
|
rte_errno = 0;
|
|
|
|
while (--retries) {
|
|
|
|
rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh);
|
|
|
|
if (rte_errno != EINPROGRESS)
|
|
|
|
break;
|
|
|
|
rte_pause();
|
|
|
|
}
|
2020-10-20 03:02:23 +00:00
|
|
|
|
2022-10-20 15:41:47 +00:00
|
|
|
if (sh->sws_cmng.pools) {
|
2020-05-12 12:52:13 +00:00
|
|
|
struct mlx5_flow_counter_pool *pool;
|
2022-10-20 15:41:47 +00:00
|
|
|
uint16_t n_valid = sh->sws_cmng.n_valid;
|
|
|
|
bool fallback = sh->sws_cmng.counter_fallback;
|
2020-05-12 12:52:13 +00:00
|
|
|
|
2020-10-20 03:02:24 +00:00
|
|
|
for (i = 0; i < n_valid; ++i) {
|
2022-10-20 15:41:47 +00:00
|
|
|
pool = sh->sws_cmng.pools[i];
|
2020-10-20 03:02:28 +00:00
|
|
|
if (!fallback && pool->min_dcs)
|
2020-05-12 12:52:13 +00:00
|
|
|
claim_zero(mlx5_devx_cmd_destroy
|
|
|
|
(pool->min_dcs));
|
|
|
|
for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) {
|
2020-10-20 03:02:28 +00:00
|
|
|
struct mlx5_flow_counter *cnt =
|
|
|
|
MLX5_POOL_GET_CNT(pool, j);
|
|
|
|
|
|
|
|
if (cnt->action)
|
2020-05-12 12:52:13 +00:00
|
|
|
claim_zero
|
2020-12-28 09:54:15 +00:00
|
|
|
(mlx5_flow_os_destroy_flow_action
|
2020-10-20 03:02:28 +00:00
|
|
|
(cnt->action));
|
2022-10-31 16:08:20 +00:00
|
|
|
if (fallback && cnt->dcs_when_free)
|
2020-05-12 12:52:13 +00:00
|
|
|
claim_zero(mlx5_devx_cmd_destroy
|
2020-10-20 03:02:28 +00:00
|
|
|
(cnt->dcs_when_free));
|
2019-07-16 14:34:53 +00:00
|
|
|
}
|
2020-06-28 07:35:26 +00:00
|
|
|
mlx5_free(pool);
|
2019-07-16 14:34:53 +00:00
|
|
|
}
|
2022-10-20 15:41:47 +00:00
|
|
|
mlx5_free(sh->sws_cmng.pools);
|
2019-07-16 14:34:53 +00:00
|
|
|
}
|
2022-10-20 15:41:47 +00:00
|
|
|
mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
|
2019-07-16 14:34:53 +00:00
|
|
|
while (mng) {
|
|
|
|
mlx5_flow_destroy_counter_stat_mem_mng(mng);
|
2022-10-20 15:41:47 +00:00
|
|
|
mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
|
2019-07-16 14:34:53 +00:00
|
|
|
}
|
2022-10-20 15:41:47 +00:00
|
|
|
memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
|
2019-07-16 14:34:53 +00:00
|
|
|
}
|
|
|
|
|
2021-04-20 10:55:17 +00:00
|
|
|
/**
|
|
|
|
* Initialize the aso flow meters management structure.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free
|
|
|
|
*/
|
|
|
|
int
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
|
2021-04-20 10:55:17 +00:00
|
|
|
{
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
if (!sh->mtrmng) {
|
|
|
|
sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
|
|
|
|
sizeof(*sh->mtrmng),
|
2021-04-20 10:55:17 +00:00
|
|
|
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
if (!sh->mtrmng) {
|
|
|
|
DRV_LOG(ERR,
|
|
|
|
"meter management allocation was failed.");
|
2021-04-20 10:55:17 +00:00
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
if (sh->meter_aso_en) {
|
|
|
|
rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
|
2021-11-01 06:30:40 +00:00
|
|
|
rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
LIST_INIT(&sh->mtrmng->pools_mng.meters);
|
|
|
|
}
|
|
|
|
sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID;
|
2021-04-20 10:55:17 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Close and release all the resources of
|
|
|
|
* the ASO flow meter management structure.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
struct mlx5_aso_mtr_pool *mtr_pool;
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng;
|
2021-04-20 10:55:17 +00:00
|
|
|
uint32_t idx;
|
2021-04-20 10:55:19 +00:00
|
|
|
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
|
|
|
|
struct mlx5_aso_mtr *aso_mtr;
|
|
|
|
int i;
|
|
|
|
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
|
2021-04-20 10:55:17 +00:00
|
|
|
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
if (sh->meter_aso_en) {
|
|
|
|
mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
|
|
|
|
idx = mtrmng->pools_mng.n_valid;
|
|
|
|
while (idx--) {
|
|
|
|
mtr_pool = mtrmng->pools_mng.pools[idx];
|
2021-04-20 10:55:19 +00:00
|
|
|
#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) {
|
|
|
|
aso_mtr = &mtr_pool->mtrs[i];
|
2022-05-13 07:33:06 +00:00
|
|
|
if (aso_mtr->fm.meter_action_g)
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
claim_zero
|
|
|
|
(mlx5_glue->destroy_flow_action
|
2022-05-13 07:33:06 +00:00
|
|
|
(aso_mtr->fm.meter_action_g));
|
|
|
|
if (aso_mtr->fm.meter_action_y)
|
|
|
|
claim_zero
|
|
|
|
(mlx5_glue->destroy_flow_action
|
|
|
|
(aso_mtr->fm.meter_action_y));
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
}
|
2021-04-20 10:55:19 +00:00
|
|
|
#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
claim_zero(mlx5_devx_cmd_destroy
|
2021-04-20 10:55:17 +00:00
|
|
|
(mtr_pool->devx_obj));
|
net/mlx5: support meter policy operations
MLX5 PMD checks the validation of actions in policy while add
a new meter policy, if pass the validation, allocates the new
policy object from the meter policy indexed memory pool.
It is common to use the same policy for multiple meters.
MLX5 PMD supports two types of policy: termination policy and
no-termination policy.
Implement the next policy operations:
validate:
The driver doesn't support to configure actions in the flow
after the meter action except one case when the meter policy
is configured to do nothing in GREEN\YELLOW and only DROP action
in RED, this special policy is called non-terminated policy
and is handed as a singleton object internally.
For all the terminated policies, the next actions are supported:
GREEN - QUEUE, RSS, PORT_ID, JUMP, DROP, MARK and SET_TAG.
YELLOW - not supported at all -> must be empty.
RED - must include DROP action.
Hence, in ingress case, for example,
QUEUE\RSS\JUMP must be configured as last action for GREEN color.
All the above limitations will be validated.
create:
Validate the policy configuration.
Prepare the related tables and actions.
destroy:
Release the created policy resources.
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
2021-04-27 10:43:51 +00:00
|
|
|
mtrmng->pools_mng.n_valid--;
|
|
|
|
mlx5_free(mtr_pool);
|
|
|
|
}
|
|
|
|
mlx5_free(sh->mtrmng->pools_mng.pools);
|
2021-04-20 10:55:17 +00:00
|
|
|
}
|
|
|
|
mlx5_free(sh->mtrmng);
|
|
|
|
sh->mtrmng = NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-01 17:57:49 +00:00
|
|
|
/* Send FLOW_AGED event if needed. */
|
|
|
|
void
|
|
|
|
mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
struct mlx5_age_info *age_info;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < sh->max_port; i++) {
|
|
|
|
age_info = &sh->port[i].age_info;
|
|
|
|
if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
|
|
|
|
continue;
|
2021-04-29 09:55:41 +00:00
|
|
|
MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW);
|
|
|
|
if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) {
|
|
|
|
MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER);
|
2020-11-01 17:57:49 +00:00
|
|
|
rte_eth_dev_callback_process
|
|
|
|
(&rte_eth_devices[sh->port[i].devx_ih_port_id],
|
|
|
|
RTE_ETH_EVENT_FLOW_AGED, NULL);
|
2021-04-29 09:55:41 +00:00
|
|
|
}
|
2020-11-01 17:57:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-05 12:23:15 +00:00
|
|
|
/*
|
|
|
|
* Initialize the ASO connection tracking structure.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
|
|
|
* Pointer to mlx5_dev_ctx_shared object.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (sh->ct_mng)
|
|
|
|
return 0;
|
2022-10-20 15:41:44 +00:00
|
|
|
sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng) +
|
|
|
|
sizeof(struct mlx5_aso_sq) * MLX5_ASO_CT_SQ_NUM,
|
2021-05-05 12:23:15 +00:00
|
|
|
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
|
|
|
|
if (!sh->ct_mng) {
|
|
|
|
DRV_LOG(ERR, "ASO CT management allocation failed.");
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2022-10-20 15:41:46 +00:00
|
|
|
err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING, MLX5_ASO_CT_SQ_NUM);
|
2021-05-05 12:23:15 +00:00
|
|
|
if (err) {
|
|
|
|
mlx5_free(sh->ct_mng);
|
|
|
|
/* rte_errno should be extracted from the failure. */
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
rte_spinlock_init(&sh->ct_mng->ct_sl);
|
|
|
|
rte_rwlock_init(&sh->ct_mng->resize_rwl);
|
|
|
|
LIST_INIT(&sh->ct_mng->free_cts);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-05 12:23:19 +00:00
|
|
|
/*
|
|
|
|
* Close and release all the resources of the
|
|
|
|
* ASO connection tracking management structure.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
|
|
|
|
struct mlx5_aso_ct_pool *ct_pool;
|
|
|
|
struct mlx5_aso_ct_action *ct;
|
|
|
|
uint32_t idx;
|
|
|
|
uint32_t val;
|
|
|
|
uint32_t cnt;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING);
|
|
|
|
idx = mng->next;
|
|
|
|
while (idx--) {
|
|
|
|
cnt = 0;
|
|
|
|
ct_pool = mng->pools[idx];
|
|
|
|
for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
|
|
|
|
ct = &ct_pool->actions[i];
|
|
|
|
val = __atomic_fetch_sub(&ct->refcnt, 1,
|
|
|
|
__ATOMIC_RELAXED);
|
|
|
|
MLX5_ASSERT(val == 1);
|
|
|
|
if (val > 1)
|
|
|
|
cnt++;
|
|
|
|
#ifdef HAVE_MLX5_DR_ACTION_ASO_CT
|
|
|
|
if (ct->dr_action_orig)
|
|
|
|
claim_zero(mlx5_glue->destroy_flow_action
|
|
|
|
(ct->dr_action_orig));
|
|
|
|
if (ct->dr_action_rply)
|
|
|
|
claim_zero(mlx5_glue->destroy_flow_action
|
|
|
|
(ct->dr_action_rply));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj));
|
|
|
|
if (cnt) {
|
|
|
|
DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u",
|
|
|
|
cnt, i);
|
|
|
|
}
|
|
|
|
mlx5_free(ct_pool);
|
|
|
|
/* in case of failure. */
|
|
|
|
mng->next--;
|
|
|
|
}
|
|
|
|
mlx5_free(mng->pools);
|
|
|
|
mlx5_free(mng);
|
|
|
|
/* Management structure must be cleared to 0s during allocation. */
|
|
|
|
sh->ct_mng = NULL;
|
|
|
|
}
|
|
|
|
|
2020-04-16 02:42:02 +00:00
|
|
|
/**
|
|
|
|
* Initialize the flow resources' indexed mempool.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
2020-06-03 15:05:55 +00:00
|
|
|
* Pointer to mlx5_dev_ctx_shared object.
|
2020-04-16 02:42:02 +00:00
|
|
|
*/
|
|
|
|
static void
|
2022-02-14 09:35:07 +00:00
|
|
|
mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh)
|
2020-04-16 02:42:02 +00:00
|
|
|
{
|
|
|
|
uint8_t i;
|
2020-06-08 16:01:56 +00:00
|
|
|
struct mlx5_indexed_pool_config cfg;
|
2020-04-16 02:42:02 +00:00
|
|
|
|
net/mlx5: add reclaim memory mode
Currently, when flow destroyed, some memory resources may still be kept
as cached to help next time create flow more efficiently.
Some system may need the resources to be more flexible with flow create
and destroy. After peak time, with millions of flows destroyed, the
system would prefer the resources to be reclaimed completely, no cache
is needed. Then the resources can be allocated and used by other
components. The system is not so sensitive about the flow insertion
rate, but more care about the resources.
Both DPDK mlx5 PMD driver and the low level component rdma-core have
provided the flow resources to be configured cached or not, but there is
no APIs or parameters exposed to user to configure the flow resources
cache mode. In this case, introduce a new PMD devarg to let user
configure the flow resources cache mode will be helpful.
This commit is to add a new "reclaim_mem_mode" to help user configure if
the destroyed flows' cache resources should be kept or not.
Their will be three mode can be chosen:
1. 0(none). It means the flow resources will be cached as usual. The
resources will be cached, helpful with flow insertion rate.
2. 1(light). It will only enable the DPDK PMD level resources reclaim.
3. 2(aggressive). Both DPDK PMD level and rdma-core low level will be
configured as reclaimed mode.
With these three mode, user can configure the resources cache mode with
different levels.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
2020-06-01 06:09:43 +00:00
|
|
|
for (i = 0; i < MLX5_IPOOL_MAX; ++i) {
|
2020-06-08 16:01:56 +00:00
|
|
|
cfg = mlx5_ipool_cfg[i];
|
|
|
|
switch (i) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Set MLX5_IPOOL_MLX5_FLOW ipool size
|
|
|
|
* according to PCI function flow configuration.
|
|
|
|
*/
|
|
|
|
case MLX5_IPOOL_MLX5_FLOW:
|
2022-02-14 09:35:07 +00:00
|
|
|
cfg.size = sh->config.dv_flow_en ?
|
2020-06-08 16:01:56 +00:00
|
|
|
sizeof(struct mlx5_flow_handle) :
|
|
|
|
MLX5_FLOW_HANDLE_VERBS_SIZE;
|
|
|
|
break;
|
|
|
|
}
|
2022-02-14 09:35:07 +00:00
|
|
|
if (sh->config.reclaim_mode) {
|
2020-06-08 16:01:56 +00:00
|
|
|
cfg.release_mem_en = 1;
|
2021-07-13 08:44:39 +00:00
|
|
|
cfg.per_core_cache = 0;
|
2021-07-13 08:44:58 +00:00
|
|
|
} else {
|
|
|
|
cfg.release_mem_en = 0;
|
2021-07-13 08:44:39 +00:00
|
|
|
}
|
2020-06-08 16:01:56 +00:00
|
|
|
sh->ipool[i] = mlx5_ipool_create(&cfg);
|
net/mlx5: add reclaim memory mode
Currently, when flow destroyed, some memory resources may still be kept
as cached to help next time create flow more efficiently.
Some system may need the resources to be more flexible with flow create
and destroy. After peak time, with millions of flows destroyed, the
system would prefer the resources to be reclaimed completely, no cache
is needed. Then the resources can be allocated and used by other
components. The system is not so sensitive about the flow insertion
rate, but more care about the resources.
Both DPDK mlx5 PMD driver and the low level component rdma-core have
provided the flow resources to be configured cached or not, but there is
no APIs or parameters exposed to user to configure the flow resources
cache mode. In this case, introduce a new PMD devarg to let user
configure the flow resources cache mode will be helpful.
This commit is to add a new "reclaim_mem_mode" to help user configure if
the destroyed flows' cache resources should be kept or not.
Their will be three mode can be chosen:
1. 0(none). It means the flow resources will be cached as usual. The
resources will be cached, helpful with flow insertion rate.
2. 1(light). It will only enable the DPDK PMD level resources reclaim.
3. 2(aggressive). Both DPDK PMD level and rdma-core low level will be
configured as reclaimed mode.
With these three mode, user can configure the resources cache mode with
different levels.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
2020-06-01 06:09:43 +00:00
|
|
|
}
|
2020-04-16 02:42:02 +00:00
|
|
|
}
|
|
|
|
|
2021-07-13 08:44:54 +00:00
|
|
|
|
2020-04-16 02:42:02 +00:00
|
|
|
/**
|
|
|
|
* Release the flow resources' indexed mempool.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
2020-06-03 15:05:55 +00:00
|
|
|
* Pointer to mlx5_dev_ctx_shared object.
|
2020-04-16 02:42:02 +00:00
|
|
|
*/
|
|
|
|
static void
|
2020-06-03 15:05:55 +00:00
|
|
|
mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh)
|
2020-04-16 02:42:02 +00:00
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < MLX5_IPOOL_MAX; ++i)
|
|
|
|
mlx5_ipool_destroy(sh->ipool[i]);
|
2021-07-13 08:44:54 +00:00
|
|
|
for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i)
|
|
|
|
if (sh->mdh_ipools[i])
|
|
|
|
mlx5_ipool_destroy(sh->mdh_ipools[i]);
|
2020-04-16 02:42:02 +00:00
|
|
|
}
|
|
|
|
|
2020-07-17 07:11:46 +00:00
|
|
|
/*
|
|
|
|
* Check if dynamic flex parser for eCPRI already exists.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* true on exists, false on not.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
2021-11-02 08:53:41 +00:00
|
|
|
struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
|
2020-07-17 07:11:46 +00:00
|
|
|
|
|
|
|
return !!prf->obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocation of a flex parser for eCPRI. Once created, this parser related
|
|
|
|
* resources will be held until the device is closed.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
2021-11-02 08:53:41 +00:00
|
|
|
struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
|
2020-07-17 07:11:49 +00:00
|
|
|
struct mlx5_devx_graph_node_attr node = {
|
|
|
|
.modify_field_select = 0,
|
|
|
|
};
|
|
|
|
uint32_t ids[8];
|
|
|
|
int ret;
|
2020-07-17 07:11:46 +00:00
|
|
|
|
2022-02-14 09:34:58 +00:00
|
|
|
if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
|
2020-07-17 07:11:50 +00:00
|
|
|
DRV_LOG(ERR, "Dynamic flex parser is not supported "
|
|
|
|
"for device %s.", priv->dev_data->name);
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2020-07-17 07:11:49 +00:00
|
|
|
node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
|
|
|
|
/* 8 bytes now: 4B common header + 4B message body header. */
|
|
|
|
node.header_length_base_value = 0x8;
|
|
|
|
/* After MAC layer: Ether / VLAN. */
|
|
|
|
node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC;
|
|
|
|
/* Type of compared condition should be 0xAEFE in the L2 layer. */
|
|
|
|
node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI;
|
|
|
|
/* Sample #0: type in common header. */
|
|
|
|
node.sample[0].flow_match_sample_en = 1;
|
|
|
|
/* Fixed offset. */
|
|
|
|
node.sample[0].flow_match_sample_offset_mode = 0x0;
|
|
|
|
/* Only the 2nd byte will be used. */
|
|
|
|
node.sample[0].flow_match_sample_field_base_offset = 0x0;
|
|
|
|
/* Sample #1: message payload. */
|
|
|
|
node.sample[1].flow_match_sample_en = 1;
|
|
|
|
/* Fixed offset. */
|
|
|
|
node.sample[1].flow_match_sample_offset_mode = 0x0;
|
|
|
|
/*
|
|
|
|
* Only the first two bytes will be used right now, and its offset will
|
|
|
|
* start after the common header that with the length of a DW(u32).
|
|
|
|
*/
|
|
|
|
node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
|
2021-10-19 20:55:52 +00:00
|
|
|
prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->cdev->ctx, &node);
|
2020-07-17 07:11:49 +00:00
|
|
|
if (!prf->obj) {
|
|
|
|
DRV_LOG(ERR, "Failed to create flex parser node object.");
|
|
|
|
return (rte_errno == 0) ? -ENODEV : -rte_errno;
|
|
|
|
}
|
|
|
|
prf->num = 2;
|
|
|
|
ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num);
|
|
|
|
if (ret) {
|
|
|
|
DRV_LOG(ERR, "Failed to query sample IDs.");
|
|
|
|
return (rte_errno == 0) ? -ENODEV : -rte_errno;
|
|
|
|
}
|
|
|
|
prf->offset[0] = 0x0;
|
|
|
|
prf->offset[1] = sizeof(uint32_t);
|
|
|
|
prf->ids[0] = ids[0];
|
|
|
|
prf->ids[1] = ids[1];
|
2020-07-17 07:11:46 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-17 07:11:49 +00:00
|
|
|
/*
|
|
|
|
* Destroy the flex parser node, including the parser itself, input / output
|
|
|
|
* arcs and DW samples. Resources could be reused then.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
2021-11-02 08:53:41 +00:00
|
|
|
struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
|
2020-07-17 07:11:49 +00:00
|
|
|
|
|
|
|
if (prf->obj)
|
|
|
|
mlx5_devx_cmd_destroy(prf->obj);
|
|
|
|
prf->obj = NULL;
|
|
|
|
}
|
|
|
|
|
2021-10-12 12:45:44 +00:00
|
|
|
uint32_t
|
|
|
|
mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr)
|
|
|
|
{
|
|
|
|
uint32_t sw_parsing_offloads = 0;
|
|
|
|
|
|
|
|
if (attr->swp) {
|
|
|
|
sw_parsing_offloads |= MLX5_SW_PARSING_CAP;
|
|
|
|
if (attr->swp_csum)
|
|
|
|
sw_parsing_offloads |= MLX5_SW_PARSING_CSUM_CAP;
|
|
|
|
|
|
|
|
if (attr->swp_lso)
|
|
|
|
sw_parsing_offloads |= MLX5_SW_PARSING_TSO_CAP;
|
|
|
|
}
|
|
|
|
return sw_parsing_offloads;
|
|
|
|
}
|
|
|
|
|
2021-10-12 12:45:47 +00:00
|
|
|
uint32_t
|
|
|
|
mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr)
|
|
|
|
{
|
|
|
|
uint32_t tn_offloads = 0;
|
|
|
|
|
|
|
|
if (attr->tunnel_stateless_vxlan)
|
|
|
|
tn_offloads |= MLX5_TUNNELED_OFFLOADS_VXLAN_CAP;
|
|
|
|
if (attr->tunnel_stateless_gre)
|
|
|
|
tn_offloads |= MLX5_TUNNELED_OFFLOADS_GRE_CAP;
|
|
|
|
if (attr->tunnel_stateless_geneve_rx)
|
|
|
|
tn_offloads |= MLX5_TUNNELED_OFFLOADS_GENEVE_CAP;
|
|
|
|
return tn_offloads;
|
|
|
|
}
|
|
|
|
|
2021-11-03 18:35:13 +00:00
|
|
|
/* Fill all fields of UAR structure. */
|
2020-07-22 14:59:08 +00:00
|
|
|
static int
|
2021-11-03 18:35:13 +00:00
|
|
|
mlx5_rxtx_uars_prepare(struct mlx5_dev_ctx_shared *sh)
|
2020-07-22 14:59:08 +00:00
|
|
|
{
|
2021-11-03 18:35:13 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mlx5_devx_uar_prepare(sh->cdev, &sh->tx_uar);
|
|
|
|
if (ret) {
|
|
|
|
DRV_LOG(ERR, "Failed to prepare Tx DevX UAR.");
|
|
|
|
return -rte_errno;
|
2020-07-22 14:59:08 +00:00
|
|
|
}
|
2021-11-03 18:35:13 +00:00
|
|
|
MLX5_ASSERT(sh->tx_uar.obj);
|
|
|
|
MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar.obj));
|
|
|
|
ret = mlx5_devx_uar_prepare(sh->cdev, &sh->rx_uar);
|
|
|
|
if (ret) {
|
|
|
|
DRV_LOG(ERR, "Failed to prepare Rx DevX UAR.");
|
|
|
|
mlx5_devx_uar_release(&sh->tx_uar);
|
|
|
|
return -rte_errno;
|
2020-07-22 14:59:08 +00:00
|
|
|
}
|
2021-11-03 18:35:13 +00:00
|
|
|
MLX5_ASSERT(sh->rx_uar.obj);
|
|
|
|
MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->rx_uar.obj));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mlx5_rxtx_uars_release(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
mlx5_devx_uar_release(&sh->rx_uar);
|
|
|
|
mlx5_devx_uar_release(&sh->tx_uar);
|
2020-07-22 14:59:08 +00:00
|
|
|
}
|
|
|
|
|
2021-10-18 22:43:53 +00:00
|
|
|
/**
|
2021-10-19 20:56:02 +00:00
|
|
|
* rte_mempool_walk() callback to unregister Rx mempools.
|
|
|
|
* It used when implicit mempool registration is disabled.
|
2021-10-18 22:43:53 +00:00
|
|
|
*
|
|
|
|
* @param mp
|
|
|
|
* The mempool being walked.
|
|
|
|
* @param arg
|
|
|
|
* Pointer to the device shared context.
|
|
|
|
*/
|
|
|
|
static void
|
2021-10-19 20:56:02 +00:00
|
|
|
mlx5_dev_ctx_shared_rx_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
|
2021-10-18 22:43:53 +00:00
|
|
|
{
|
|
|
|
struct mlx5_dev_ctx_shared *sh = arg;
|
|
|
|
|
2021-10-19 20:56:02 +00:00
|
|
|
mlx5_dev_mempool_unregister(sh->cdev, mp);
|
2021-10-18 22:43:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Callback used when implicit mempool registration is disabled
|
|
|
|
* in order to track Rx mempool destruction.
|
|
|
|
*
|
|
|
|
* @param event
|
|
|
|
* Mempool life cycle event.
|
|
|
|
* @param mp
|
|
|
|
* An Rx mempool registered explicitly when the port is started.
|
|
|
|
* @param arg
|
|
|
|
* Pointer to a device shared context.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mlx5_dev_ctx_shared_rx_mempool_event_cb(enum rte_mempool_event event,
|
|
|
|
struct rte_mempool *mp, void *arg)
|
|
|
|
{
|
|
|
|
struct mlx5_dev_ctx_shared *sh = arg;
|
|
|
|
|
|
|
|
if (event == RTE_MEMPOOL_EVENT_DESTROY)
|
2021-10-19 20:56:02 +00:00
|
|
|
mlx5_dev_mempool_unregister(sh->cdev, mp);
|
2021-10-18 22:43:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
|
|
|
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Check if we only need to track Rx mempool destruction. */
|
2021-10-19 20:55:48 +00:00
|
|
|
if (!sh->cdev->config.mr_mempool_reg_en) {
|
2021-10-18 22:43:53 +00:00
|
|
|
ret = rte_mempool_event_callback_register
|
|
|
|
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
|
|
|
|
return ret == 0 || rte_errno == EEXIST ? 0 : ret;
|
|
|
|
}
|
2021-10-19 20:56:02 +00:00
|
|
|
return mlx5_dev_mempool_subscribe(sh->cdev);
|
2021-10-18 22:43:53 +00:00
|
|
|
}
|
|
|
|
|
2021-10-21 08:56:36 +00:00
|
|
|
/**
|
|
|
|
* Set up multiple TISs with different affinities according to
|
|
|
|
* number of bonding ports
|
|
|
|
*
|
|
|
|
* @param priv
|
|
|
|
* Pointer of shared context.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Zero on success, -1 otherwise.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct mlx5_devx_lag_context lag_ctx = { 0 };
|
|
|
|
struct mlx5_devx_tis_attr tis_attr = { 0 };
|
|
|
|
|
|
|
|
tis_attr.transport_domain = sh->td->id;
|
|
|
|
if (sh->bond.n_port) {
|
|
|
|
if (!mlx5_devx_cmd_query_lag(sh->cdev->ctx, &lag_ctx)) {
|
|
|
|
sh->lag.tx_remap_affinity[0] =
|
|
|
|
lag_ctx.tx_remap_affinity_1;
|
|
|
|
sh->lag.tx_remap_affinity[1] =
|
|
|
|
lag_ctx.tx_remap_affinity_2;
|
|
|
|
sh->lag.affinity_mode = lag_ctx.port_select_mode;
|
|
|
|
} else {
|
|
|
|
DRV_LOG(ERR, "Failed to query lag affinity.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) {
|
|
|
|
for (i = 0; i < sh->bond.n_port; i++) {
|
|
|
|
tis_attr.lag_tx_port_affinity =
|
|
|
|
MLX5_IFC_LAG_MAP_TIS_AFFINITY(i,
|
|
|
|
sh->bond.n_port);
|
|
|
|
sh->tis[i] = mlx5_devx_cmd_create_tis(sh->cdev->ctx,
|
|
|
|
&tis_attr);
|
|
|
|
if (!sh->tis[i]) {
|
|
|
|
DRV_LOG(ERR, "Failed to TIS %d/%d for bonding device"
|
|
|
|
" %s.", i, sh->bond.n_port,
|
|
|
|
sh->ibdev_name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DRV_LOG(DEBUG, "LAG number of ports : %d, affinity_1 & 2 : pf%d & %d.\n",
|
|
|
|
sh->bond.n_port, lag_ctx.tx_remap_affinity_1,
|
|
|
|
lag_ctx.tx_remap_affinity_2);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (sh->lag.affinity_mode == MLX5_LAG_MODE_HASH)
|
|
|
|
DRV_LOG(INFO, "Device %s enabled HW hash based LAG.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
}
|
|
|
|
tis_attr.lag_tx_port_affinity = 0;
|
|
|
|
sh->tis[0] = mlx5_devx_cmd_create_tis(sh->cdev->ctx, &tis_attr);
|
|
|
|
if (!sh->tis[0]) {
|
|
|
|
DRV_LOG(ERR, "Failed to TIS 0 for bonding device"
|
|
|
|
" %s.", sh->ibdev_name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-02-14 09:35:07 +00:00
|
|
|
/**
|
|
|
|
* Verify and store value for share device argument.
|
|
|
|
*
|
|
|
|
* @param[in] key
|
|
|
|
* Key argument to verify.
|
|
|
|
* @param[in] val
|
|
|
|
* Value associated with key.
|
|
|
|
* @param opaque
|
|
|
|
* User data.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)
|
|
|
|
{
|
|
|
|
struct mlx5_sh_config *config = opaque;
|
|
|
|
signed long tmp;
|
|
|
|
|
|
|
|
errno = 0;
|
|
|
|
tmp = strtol(val, NULL, 0);
|
|
|
|
if (errno) {
|
|
|
|
rte_errno = errno;
|
|
|
|
DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
|
|
|
|
/* Negative values are acceptable for some keys only. */
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
if (strcmp(MLX5_TX_PP, key) == 0) {
|
|
|
|
unsigned long mod = tmp >= 0 ? tmp : -tmp;
|
|
|
|
|
|
|
|
if (!mod) {
|
|
|
|
DRV_LOG(ERR, "Zero Tx packet pacing parameter.");
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
config->tx_pp = tmp;
|
|
|
|
} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
|
|
|
|
config->tx_skew = tmp;
|
|
|
|
} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
|
|
|
|
config->l3_vxlan_en = !!tmp;
|
|
|
|
} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
|
|
|
|
config->vf_nl_en = !!tmp;
|
|
|
|
} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
|
|
|
|
config->dv_esw_en = !!tmp;
|
|
|
|
} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
|
2022-02-24 13:40:40 +00:00
|
|
|
if (tmp > 2) {
|
|
|
|
DRV_LOG(ERR, "Invalid %s parameter.", key);
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
config->dv_flow_en = tmp;
|
2022-02-14 09:35:07 +00:00
|
|
|
} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
|
|
|
|
if (tmp != MLX5_XMETA_MODE_LEGACY &&
|
|
|
|
tmp != MLX5_XMETA_MODE_META16 &&
|
|
|
|
tmp != MLX5_XMETA_MODE_META32 &&
|
2022-10-20 15:41:40 +00:00
|
|
|
tmp != MLX5_XMETA_MODE_MISS_INFO &&
|
|
|
|
tmp != MLX5_XMETA_MODE_META32_HWS) {
|
2022-02-14 09:35:07 +00:00
|
|
|
DRV_LOG(ERR, "Invalid extensive metadata parameter.");
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
if (tmp != MLX5_XMETA_MODE_MISS_INFO)
|
|
|
|
config->dv_xmeta_en = tmp;
|
|
|
|
else
|
|
|
|
config->dv_miss_info = 1;
|
|
|
|
} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
|
|
|
|
config->lacp_by_user = !!tmp;
|
|
|
|
} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
|
|
|
|
if (tmp != MLX5_RCM_NONE &&
|
|
|
|
tmp != MLX5_RCM_LIGHT &&
|
|
|
|
tmp != MLX5_RCM_AGGR) {
|
|
|
|
DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
config->reclaim_mode = tmp;
|
|
|
|
} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
|
|
|
|
config->decap_en = !!tmp;
|
|
|
|
} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
|
|
|
|
config->allow_duplicate_pattern = !!tmp;
|
2022-10-20 15:41:39 +00:00
|
|
|
} else if (strcmp(MLX5_FDB_DEFAULT_RULE_EN, key) == 0) {
|
|
|
|
config->fdb_def_rule = !!tmp;
|
net/mlx5: support flow counter action for HWS
This commit adds HW steering counter action support.
The pool mechanism is the basic data structure for the HW steering
counter.
The HW steering's counter pool is based on the rte_ring of zero-copy
variation.
There are two global rte_rings:
1. free_list:
Store the counters indexes, which are ready for use.
2. wait_reset_list:
Store the counters indexes, which are just freed from the user and
need to query the hardware counter to get the reset value before
this counter can be reused again.
The counter pool also supports cache per HW steering's queues, which are
also based on the rte_ring of zero-copy variation.
The cache can be configured in size, preload, threshold, and fetch size,
they are all exposed via device args.
The main operations of the counter pool are as follows:
- Get one counter from the pool:
1. The user call _get_* API.
2. If the cache is enabled, dequeue one counter index from the local
cache:
2. A: if the dequeued one from the local cache is still in reset
status (counter's query_gen_when_free is equal to pool's query
gen):
I. Flush all counters in the local cache back to global
wait_reset_list.
II. Fetch _fetch_sz_ counters into the cache from the global
free list.
III. Fetch one counter from the cache.
3. If the cache is empty, fetch _fetch_sz_ counters from the global
free list into the cache and fetch one counter from the cache.
- Free one counter into the pool:
1. The user calls _put_* API.
2. Put the counter into the local cache.
3. If the local cache is full:
A: Write back all counters above _threshold_ into the global
wait_reset_list.
B: Also, write back this counter into the global wait_reset_list.
When the local cache is disabled, _get_/_put_ cache directly from/into
global list.
Signed-off-by: Xiaoyu Min <jackmin@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:42 +00:00
|
|
|
} else if (strcmp(MLX5_HWS_CNT_SERVICE_CORE, key) == 0) {
|
|
|
|
config->cnt_svc.service_core = tmp;
|
|
|
|
} else if (strcmp(MLX5_HWS_CNT_CYCLE_TIME, key) == 0) {
|
|
|
|
config->cnt_svc.cycle_time = tmp;
|
net/mlx5: support device control of representor matching
In some E-Switch use cases, applications want to receive all traffic
on a single port. Since currently, flow API does not provide a way to
match traffic forwarded to any port representor, this patch adds
support for controlling representor matching on ingress flow rules.
Representor matching is controlled through a new device argument
repr_matching_en.
- If representor matching is enabled (default setting),
then each ingress pattern template has an implicit REPRESENTED_PORT
item added. Flow rules based on this pattern template will match
the vport associated with the port on which the rule is created.
- If representor matching is disabled, then there will be no implicit
item added. As a result ingress flow rules will match traffic
coming to any port, not only the port on which the flow rule is
created.
Representor matching is enabled by default, to provide an expected
default behavior.
This patch enables egress flow rules on representors when E-Switch is
enabled in the following configurations:
- repr_matching_en=1 and dv_xmeta_en=4
- repr_matching_en=1 and dv_xmeta_en=0
- repr_matching_en=0 and dv_xmeta_en=0
When representor matching is enabled, the following logic is
implemented:
1. Creating an egress template table in group 0 for each port. These
tables will hold default flow rules defined as follows:
pattern SQ
actions MODIFY_FIELD (set available bits in REG_C_0 to
vport_meta_tag)
MODIFY_FIELD (copy REG_A to REG_C_1, only when
dv_xmeta_en == 4)
JUMP (group 1)
2. Egress pattern templates created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches available bits of REG_C_0.
3. Egress flow rules created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches vport_meta_tag placed in available bits of REG_C_0.
4. Egress template tables created by an application, which are in
group n, are placed in group n + 1.
5. Items and actions related to META are operating on REG_A when
dv_xmeta_en == 0 or REG_C_1 when dv_xmeta_en == 4.
When representor matching is disabled and extended metadata is disabled,
no changes to the current logic are required.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:51 +00:00
|
|
|
} else if (strcmp(MLX5_REPR_MATCHING_EN, key) == 0) {
|
|
|
|
config->repr_matching = !!tmp;
|
2022-02-14 09:35:07 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Parse user device parameters and adjust them according to device
|
|
|
|
* capabilities.
|
|
|
|
*
|
|
|
|
* @param sh
|
|
|
|
* Pointer to shared device context.
|
2022-02-14 09:35:11 +00:00
|
|
|
* @param mkvlist
|
|
|
|
* Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
|
2022-02-14 09:35:07 +00:00
|
|
|
* @param config
|
|
|
|
* Pointer to shared device configuration structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
|
2022-02-14 09:35:11 +00:00
|
|
|
struct mlx5_kvargs_ctrl *mkvlist,
|
2022-02-14 09:35:07 +00:00
|
|
|
struct mlx5_sh_config *config)
|
|
|
|
{
|
2022-02-14 09:35:11 +00:00
|
|
|
const char **params = (const char *[]){
|
|
|
|
MLX5_TX_PP,
|
|
|
|
MLX5_TX_SKEW,
|
|
|
|
MLX5_L3_VXLAN_EN,
|
|
|
|
MLX5_VF_NL_EN,
|
|
|
|
MLX5_DV_ESW_EN,
|
|
|
|
MLX5_DV_FLOW_EN,
|
|
|
|
MLX5_DV_XMETA_EN,
|
|
|
|
MLX5_LACP_BY_USER,
|
|
|
|
MLX5_RECLAIM_MEM,
|
|
|
|
MLX5_DECAP_EN,
|
|
|
|
MLX5_ALLOW_DUPLICATE_PATTERN,
|
2022-10-20 15:41:39 +00:00
|
|
|
MLX5_FDB_DEFAULT_RULE_EN,
|
net/mlx5: support flow counter action for HWS
This commit adds HW steering counter action support.
The pool mechanism is the basic data structure for the HW steering
counter.
The HW steering's counter pool is based on the rte_ring of zero-copy
variation.
There are two global rte_rings:
1. free_list:
Store the counters indexes, which are ready for use.
2. wait_reset_list:
Store the counters indexes, which are just freed from the user and
need to query the hardware counter to get the reset value before
this counter can be reused again.
The counter pool also supports cache per HW steering's queues, which are
also based on the rte_ring of zero-copy variation.
The cache can be configured in size, preload, threshold, and fetch size,
they are all exposed via device args.
The main operations of the counter pool are as follows:
- Get one counter from the pool:
1. The user call _get_* API.
2. If the cache is enabled, dequeue one counter index from the local
cache:
2. A: if the dequeued one from the local cache is still in reset
status (counter's query_gen_when_free is equal to pool's query
gen):
I. Flush all counters in the local cache back to global
wait_reset_list.
II. Fetch _fetch_sz_ counters into the cache from the global
free list.
III. Fetch one counter from the cache.
3. If the cache is empty, fetch _fetch_sz_ counters from the global
free list into the cache and fetch one counter from the cache.
- Free one counter into the pool:
1. The user calls _put_* API.
2. Put the counter into the local cache.
3. If the local cache is full:
A: Write back all counters above _threshold_ into the global
wait_reset_list.
B: Also, write back this counter into the global wait_reset_list.
When the local cache is disabled, _get_/_put_ cache directly from/into
global list.
Signed-off-by: Xiaoyu Min <jackmin@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:42 +00:00
|
|
|
MLX5_HWS_CNT_SERVICE_CORE,
|
|
|
|
MLX5_HWS_CNT_CYCLE_TIME,
|
net/mlx5: support device control of representor matching
In some E-Switch use cases, applications want to receive all traffic
on a single port. Since currently, flow API does not provide a way to
match traffic forwarded to any port representor, this patch adds
support for controlling representor matching on ingress flow rules.
Representor matching is controlled through a new device argument
repr_matching_en.
- If representor matching is enabled (default setting),
then each ingress pattern template has an implicit REPRESENTED_PORT
item added. Flow rules based on this pattern template will match
the vport associated with the port on which the rule is created.
- If representor matching is disabled, then there will be no implicit
item added. As a result ingress flow rules will match traffic
coming to any port, not only the port on which the flow rule is
created.
Representor matching is enabled by default, to provide an expected
default behavior.
This patch enables egress flow rules on representors when E-Switch is
enabled in the following configurations:
- repr_matching_en=1 and dv_xmeta_en=4
- repr_matching_en=1 and dv_xmeta_en=0
- repr_matching_en=0 and dv_xmeta_en=0
When representor matching is enabled, the following logic is
implemented:
1. Creating an egress template table in group 0 for each port. These
tables will hold default flow rules defined as follows:
pattern SQ
actions MODIFY_FIELD (set available bits in REG_C_0 to
vport_meta_tag)
MODIFY_FIELD (copy REG_A to REG_C_1, only when
dv_xmeta_en == 4)
JUMP (group 1)
2. Egress pattern templates created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches available bits of REG_C_0.
3. Egress flow rules created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches vport_meta_tag placed in available bits of REG_C_0.
4. Egress template tables created by an application, which are in
group n, are placed in group n + 1.
5. Items and actions related to META are operating on REG_A when
dv_xmeta_en == 0 or REG_C_1 when dv_xmeta_en == 4.
When representor matching is disabled and extended metadata is disabled,
no changes to the current logic are required.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:51 +00:00
|
|
|
MLX5_REPR_MATCHING_EN,
|
2022-02-14 09:35:11 +00:00
|
|
|
NULL,
|
|
|
|
};
|
2022-02-14 09:35:07 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Default configuration. */
|
|
|
|
memset(config, 0, sizeof(*config));
|
|
|
|
config->vf_nl_en = 1;
|
|
|
|
config->dv_esw_en = 1;
|
|
|
|
config->dv_flow_en = 1;
|
|
|
|
config->decap_en = 1;
|
|
|
|
config->allow_duplicate_pattern = 1;
|
2022-10-20 15:41:39 +00:00
|
|
|
config->fdb_def_rule = 1;
|
net/mlx5: support flow counter action for HWS
This commit adds HW steering counter action support.
The pool mechanism is the basic data structure for the HW steering
counter.
The HW steering's counter pool is based on the rte_ring of zero-copy
variation.
There are two global rte_rings:
1. free_list:
Store the counters indexes, which are ready for use.
2. wait_reset_list:
Store the counters indexes, which are just freed from the user and
need to query the hardware counter to get the reset value before
this counter can be reused again.
The counter pool also supports cache per HW steering's queues, which are
also based on the rte_ring of zero-copy variation.
The cache can be configured in size, preload, threshold, and fetch size,
they are all exposed via device args.
The main operations of the counter pool are as follows:
- Get one counter from the pool:
1. The user call _get_* API.
2. If the cache is enabled, dequeue one counter index from the local
cache:
2. A: if the dequeued one from the local cache is still in reset
status (counter's query_gen_when_free is equal to pool's query
gen):
I. Flush all counters in the local cache back to global
wait_reset_list.
II. Fetch _fetch_sz_ counters into the cache from the global
free list.
III. Fetch one counter from the cache.
3. If the cache is empty, fetch _fetch_sz_ counters from the global
free list into the cache and fetch one counter from the cache.
- Free one counter into the pool:
1. The user calls _put_* API.
2. Put the counter into the local cache.
3. If the local cache is full:
A: Write back all counters above _threshold_ into the global
wait_reset_list.
B: Also, write back this counter into the global wait_reset_list.
When the local cache is disabled, _get_/_put_ cache directly from/into
global list.
Signed-off-by: Xiaoyu Min <jackmin@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:42 +00:00
|
|
|
config->cnt_svc.cycle_time = MLX5_CNT_SVC_CYCLE_TIME_DEFAULT;
|
|
|
|
config->cnt_svc.service_core = rte_get_main_lcore();
|
net/mlx5: support device control of representor matching
In some E-Switch use cases, applications want to receive all traffic
on a single port. Since currently, flow API does not provide a way to
match traffic forwarded to any port representor, this patch adds
support for controlling representor matching on ingress flow rules.
Representor matching is controlled through a new device argument
repr_matching_en.
- If representor matching is enabled (default setting),
then each ingress pattern template has an implicit REPRESENTED_PORT
item added. Flow rules based on this pattern template will match
the vport associated with the port on which the rule is created.
- If representor matching is disabled, then there will be no implicit
item added. As a result ingress flow rules will match traffic
coming to any port, not only the port on which the flow rule is
created.
Representor matching is enabled by default, to provide an expected
default behavior.
This patch enables egress flow rules on representors when E-Switch is
enabled in the following configurations:
- repr_matching_en=1 and dv_xmeta_en=4
- repr_matching_en=1 and dv_xmeta_en=0
- repr_matching_en=0 and dv_xmeta_en=0
When representor matching is enabled, the following logic is
implemented:
1. Creating an egress template table in group 0 for each port. These
tables will hold default flow rules defined as follows:
pattern SQ
actions MODIFY_FIELD (set available bits in REG_C_0 to
vport_meta_tag)
MODIFY_FIELD (copy REG_A to REG_C_1, only when
dv_xmeta_en == 4)
JUMP (group 1)
2. Egress pattern templates created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches available bits of REG_C_0.
3. Egress flow rules created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches vport_meta_tag placed in available bits of REG_C_0.
4. Egress template tables created by an application, which are in
group n, are placed in group n + 1.
5. Items and actions related to META are operating on REG_A when
dv_xmeta_en == 0 or REG_C_1 when dv_xmeta_en == 4.
When representor matching is disabled and extended metadata is disabled,
no changes to the current logic are required.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:51 +00:00
|
|
|
config->repr_matching = 1;
|
2022-02-14 09:35:11 +00:00
|
|
|
if (mkvlist != NULL) {
|
2022-02-14 09:35:07 +00:00
|
|
|
/* Process parameters. */
|
2022-02-14 09:35:11 +00:00
|
|
|
ret = mlx5_kvargs_process(mkvlist, params,
|
|
|
|
mlx5_dev_args_check_handler, config);
|
2022-02-14 09:35:07 +00:00
|
|
|
if (ret) {
|
|
|
|
DRV_LOG(ERR, "Failed to process device arguments: %s",
|
|
|
|
strerror(rte_errno));
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Adjust parameters according to device capabilities. */
|
|
|
|
if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) {
|
|
|
|
DRV_LOG(WARNING, "DV flow is not supported.");
|
|
|
|
config->dv_flow_en = 0;
|
|
|
|
}
|
|
|
|
if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) {
|
|
|
|
DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
|
|
|
|
config->dv_esw_en = 0;
|
|
|
|
}
|
2022-03-02 19:14:31 +00:00
|
|
|
if (config->dv_esw_en && !config->dv_flow_en) {
|
|
|
|
DRV_LOG(DEBUG,
|
|
|
|
"E-Switch DV flow is supported only when DV flow is enabled.");
|
|
|
|
config->dv_esw_en = 0;
|
|
|
|
}
|
2022-02-14 09:35:07 +00:00
|
|
|
if (config->dv_miss_info && config->dv_esw_en)
|
|
|
|
config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
|
|
|
|
if (!config->dv_esw_en &&
|
|
|
|
config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
|
|
|
|
DRV_LOG(WARNING,
|
|
|
|
"Metadata mode %u is not supported (no E-Switch).",
|
|
|
|
config->dv_xmeta_en);
|
|
|
|
config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
|
|
|
|
}
|
net/mlx5: support device control of representor matching
In some E-Switch use cases, applications want to receive all traffic
on a single port. Since currently, flow API does not provide a way to
match traffic forwarded to any port representor, this patch adds
support for controlling representor matching on ingress flow rules.
Representor matching is controlled through a new device argument
repr_matching_en.
- If representor matching is enabled (default setting),
then each ingress pattern template has an implicit REPRESENTED_PORT
item added. Flow rules based on this pattern template will match
the vport associated with the port on which the rule is created.
- If representor matching is disabled, then there will be no implicit
item added. As a result ingress flow rules will match traffic
coming to any port, not only the port on which the flow rule is
created.
Representor matching is enabled by default, to provide an expected
default behavior.
This patch enables egress flow rules on representors when E-Switch is
enabled in the following configurations:
- repr_matching_en=1 and dv_xmeta_en=4
- repr_matching_en=1 and dv_xmeta_en=0
- repr_matching_en=0 and dv_xmeta_en=0
When representor matching is enabled, the following logic is
implemented:
1. Creating an egress template table in group 0 for each port. These
tables will hold default flow rules defined as follows:
pattern SQ
actions MODIFY_FIELD (set available bits in REG_C_0 to
vport_meta_tag)
MODIFY_FIELD (copy REG_A to REG_C_1, only when
dv_xmeta_en == 4)
JUMP (group 1)
2. Egress pattern templates created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches available bits of REG_C_0.
3. Egress flow rules created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches vport_meta_tag placed in available bits of REG_C_0.
4. Egress template tables created by an application, which are in
group n, are placed in group n + 1.
5. Items and actions related to META are operating on REG_A when
dv_xmeta_en == 0 or REG_C_1 when dv_xmeta_en == 4.
When representor matching is disabled and extended metadata is disabled,
no changes to the current logic are required.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:51 +00:00
|
|
|
if (config->dv_flow_en != 2 && !config->repr_matching) {
|
|
|
|
DRV_LOG(DEBUG, "Disabling representor matching is valid only "
|
|
|
|
"when HW Steering is enabled.");
|
|
|
|
config->repr_matching = 1;
|
|
|
|
}
|
2022-02-14 09:35:07 +00:00
|
|
|
if (config->tx_pp && !sh->dev_cap.txpp_en) {
|
|
|
|
DRV_LOG(ERR, "Packet pacing is not supported.");
|
|
|
|
rte_errno = ENODEV;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
if (!config->tx_pp && config->tx_skew) {
|
|
|
|
DRV_LOG(WARNING,
|
|
|
|
"\"tx_skew\" doesn't affect without \"tx_pp\".");
|
|
|
|
}
|
2022-07-27 12:24:06 +00:00
|
|
|
/* Check for LRO support. */
|
|
|
|
if (mlx5_devx_obj_ops_en(sh) && sh->cdev->config.hca_attr.lro_cap) {
|
|
|
|
/* TBD check tunnel lro caps. */
|
|
|
|
config->lro_allowed = 1;
|
|
|
|
DRV_LOG(DEBUG, "LRO is allowed.");
|
|
|
|
DRV_LOG(DEBUG,
|
|
|
|
"LRO minimal size of TCP segment required for coalescing is %d bytes.",
|
|
|
|
sh->cdev->config.hca_attr.lro_min_mss_size);
|
|
|
|
}
|
2022-02-14 09:35:07 +00:00
|
|
|
/*
|
|
|
|
* If HW has bug working with tunnel packet decapsulation and scatter
|
|
|
|
* FCS, and decapsulation is needed, clear the hw_fcs_strip bit.
|
|
|
|
* Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
|
|
|
|
*/
|
|
|
|
if (sh->dev_cap.scatter_fcs_w_decap_disable && sh->config.decap_en)
|
|
|
|
config->hw_fcs_strip = 0;
|
|
|
|
else
|
|
|
|
config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
|
|
|
|
DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
|
|
|
|
(config->hw_fcs_strip ? "" : "not "));
|
|
|
|
DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp);
|
|
|
|
DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew);
|
|
|
|
DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode);
|
|
|
|
DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en);
|
|
|
|
DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en);
|
|
|
|
DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en);
|
|
|
|
DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info);
|
|
|
|
DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en);
|
|
|
|
DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en);
|
|
|
|
DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user);
|
|
|
|
DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
|
|
|
|
DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
|
|
|
|
config->allow_duplicate_pattern);
|
2022-10-20 15:41:39 +00:00
|
|
|
DRV_LOG(DEBUG, "\"fdb_def_rule_en\" is %u.", config->fdb_def_rule);
|
net/mlx5: support device control of representor matching
In some E-Switch use cases, applications want to receive all traffic
on a single port. Since currently, flow API does not provide a way to
match traffic forwarded to any port representor, this patch adds
support for controlling representor matching on ingress flow rules.
Representor matching is controlled through a new device argument
repr_matching_en.
- If representor matching is enabled (default setting),
then each ingress pattern template has an implicit REPRESENTED_PORT
item added. Flow rules based on this pattern template will match
the vport associated with the port on which the rule is created.
- If representor matching is disabled, then there will be no implicit
item added. As a result ingress flow rules will match traffic
coming to any port, not only the port on which the flow rule is
created.
Representor matching is enabled by default, to provide an expected
default behavior.
This patch enables egress flow rules on representors when E-Switch is
enabled in the following configurations:
- repr_matching_en=1 and dv_xmeta_en=4
- repr_matching_en=1 and dv_xmeta_en=0
- repr_matching_en=0 and dv_xmeta_en=0
When representor matching is enabled, the following logic is
implemented:
1. Creating an egress template table in group 0 for each port. These
tables will hold default flow rules defined as follows:
pattern SQ
actions MODIFY_FIELD (set available bits in REG_C_0 to
vport_meta_tag)
MODIFY_FIELD (copy REG_A to REG_C_1, only when
dv_xmeta_en == 4)
JUMP (group 1)
2. Egress pattern templates created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches available bits of REG_C_0.
3. Egress flow rules created by an application have an implicit
MLX5_RTE_FLOW_ITEM_TYPE_TAG item prepended to the pattern, which
matches vport_meta_tag placed in available bits of REG_C_0.
4. Egress template tables created by an application, which are in
group n, are placed in group n + 1.
5. Items and actions related to META are operating on REG_A when
dv_xmeta_en == 0 or REG_C_1 when dv_xmeta_en == 4.
When representor matching is disabled and extended metadata is disabled,
no changes to the current logic are required.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:51 +00:00
|
|
|
DRV_LOG(DEBUG, "\"repr_matching_en\" is %u.", config->repr_matching);
|
2022-02-14 09:35:07 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-02-14 09:35:02 +00:00
|
|
|
/**
|
|
|
|
* Configure realtime timestamp format.
|
|
|
|
*
|
|
|
|
* @param sh
|
|
|
|
* Pointer to mlx5_dev_ctx_shared object.
|
|
|
|
* @param hca_attr
|
|
|
|
* Pointer to DevX HCA capabilities structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
|
|
|
|
struct mlx5_hca_attr *hca_attr)
|
|
|
|
{
|
|
|
|
uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
|
|
|
|
uint32_t reg[dw_cnt];
|
|
|
|
int ret = ENOTSUP;
|
|
|
|
|
|
|
|
if (hca_attr->access_register_user)
|
|
|
|
ret = mlx5_devx_cmd_register_read(sh->cdev->ctx,
|
|
|
|
MLX5_REGISTER_ID_MTUTC, 0,
|
|
|
|
reg, dw_cnt);
|
|
|
|
if (!ret) {
|
|
|
|
uint32_t ts_mode;
|
|
|
|
|
|
|
|
/* MTUTC register is read successfully. */
|
|
|
|
ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
|
|
|
|
if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
|
2022-02-14 09:35:06 +00:00
|
|
|
sh->dev_cap.rt_timestamp = 1;
|
2022-02-14 09:35:02 +00:00
|
|
|
} else {
|
|
|
|
/* Kernel does not support register reading. */
|
|
|
|
if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
|
2022-02-14 09:35:06 +00:00
|
|
|
sh->dev_cap.rt_timestamp = 1;
|
2022-02-14 09:35:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-27 13:15:39 +00:00
|
|
|
/**
|
2020-06-10 09:32:27 +00:00
|
|
|
* Allocate shared device context. If there is multiport device the
|
2019-03-27 13:15:39 +00:00
|
|
|
* master and representors will share this context, if there is single
|
2020-06-10 09:32:27 +00:00
|
|
|
* port dedicated device, the context will be used by only given
|
2019-03-27 13:15:39 +00:00
|
|
|
* port due to unification.
|
|
|
|
*
|
2020-06-10 09:32:27 +00:00
|
|
|
* Routine first searches the context for the specified device name,
|
2019-03-27 13:15:39 +00:00
|
|
|
* if found the shared context assumed and reference counter is incremented.
|
|
|
|
* If no context found the new one is created and initialized with specified
|
2020-06-10 09:32:27 +00:00
|
|
|
* device context and parameters.
|
2019-03-27 13:15:39 +00:00
|
|
|
*
|
|
|
|
* @param[in] spawn
|
2020-06-10 09:32:27 +00:00
|
|
|
* Pointer to the device attributes (name, port, etc).
|
2022-02-14 09:35:11 +00:00
|
|
|
* @param mkvlist
|
|
|
|
* Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
|
2019-03-27 13:15:39 +00:00
|
|
|
*
|
|
|
|
* @return
|
2020-06-03 15:05:55 +00:00
|
|
|
* Pointer to mlx5_dev_ctx_shared object on success,
|
2019-03-27 13:15:39 +00:00
|
|
|
* otherwise NULL and rte_errno is set.
|
|
|
|
*/
|
2020-06-03 15:06:00 +00:00
|
|
|
struct mlx5_dev_ctx_shared *
|
2022-02-14 09:35:11 +00:00
|
|
|
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
|
|
|
|
struct mlx5_kvargs_ctrl *mkvlist)
|
2019-03-27 13:15:39 +00:00
|
|
|
{
|
2020-06-03 15:05:55 +00:00
|
|
|
struct mlx5_dev_ctx_shared *sh;
|
2019-03-27 13:15:39 +00:00
|
|
|
int err = 0;
|
2019-03-27 13:15:45 +00:00
|
|
|
uint32_t i;
|
2019-03-27 13:15:39 +00:00
|
|
|
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(spawn);
|
2019-03-27 13:15:39 +00:00
|
|
|
/* Secondary process should not create the shared context. */
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
|
2020-06-10 09:32:27 +00:00
|
|
|
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
|
2019-03-27 13:15:39 +00:00
|
|
|
/* Search for IB context by device name. */
|
2020-06-10 09:32:27 +00:00
|
|
|
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
|
2021-10-19 20:55:52 +00:00
|
|
|
if (!strcmp(sh->ibdev_name, spawn->phys_dev_name)) {
|
2019-03-27 13:15:39 +00:00
|
|
|
sh->refcnt++;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
2019-04-05 08:55:30 +00:00
|
|
|
/* No device found, we have to create new shared context. */
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(spawn->max_port);
|
2020-06-28 09:02:44 +00:00
|
|
|
sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
|
2020-06-03 15:05:55 +00:00
|
|
|
sizeof(struct mlx5_dev_ctx_shared) +
|
2022-02-14 09:34:55 +00:00
|
|
|
spawn->max_port * sizeof(struct mlx5_dev_shared_port),
|
2020-06-28 09:02:44 +00:00
|
|
|
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
|
2019-03-27 13:15:39 +00:00
|
|
|
if (!sh) {
|
2022-02-14 09:34:55 +00:00
|
|
|
DRV_LOG(ERR, "Shared context allocation failure.");
|
|
|
|
rte_errno = ENOMEM;
|
2019-03-27 13:15:39 +00:00
|
|
|
goto exit;
|
|
|
|
}
|
2021-10-19 20:55:50 +00:00
|
|
|
pthread_mutex_init(&sh->txpp.mutex, NULL);
|
2021-10-19 20:55:46 +00:00
|
|
|
sh->numa_node = spawn->cdev->dev->numa_node;
|
|
|
|
sh->cdev = spawn->cdev;
|
2022-02-14 09:35:04 +00:00
|
|
|
sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
|
2021-03-28 13:48:12 +00:00
|
|
|
if (spawn->bond_info)
|
|
|
|
sh->bond = *spawn->bond_info;
|
2022-02-14 09:35:05 +00:00
|
|
|
err = mlx5_os_capabilities_prepare(sh);
|
2019-03-27 13:15:39 +00:00
|
|
|
if (err) {
|
2022-02-14 09:35:05 +00:00
|
|
|
DRV_LOG(ERR, "Fail to configure device capabilities.");
|
2019-03-27 13:15:39 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2022-02-14 09:35:11 +00:00
|
|
|
err = mlx5_shared_dev_ctx_args_config(sh, mkvlist, &sh->config);
|
2022-02-14 09:35:07 +00:00
|
|
|
if (err) {
|
|
|
|
DRV_LOG(ERR, "Failed to process device configure: %s",
|
|
|
|
strerror(rte_errno));
|
|
|
|
goto error;
|
|
|
|
}
|
2019-03-27 13:15:39 +00:00
|
|
|
sh->refcnt = 1;
|
|
|
|
sh->max_port = spawn->max_port;
|
2021-10-19 20:55:52 +00:00
|
|
|
strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
|
2020-06-03 15:05:56 +00:00
|
|
|
sizeof(sh->ibdev_name) - 1);
|
2021-10-19 20:55:52 +00:00
|
|
|
strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
|
2020-06-03 15:05:56 +00:00
|
|
|
sizeof(sh->ibdev_path) - 1);
|
2019-03-27 13:15:45 +00:00
|
|
|
/*
|
2022-02-14 09:34:55 +00:00
|
|
|
* Setting port_id to max unallowed value means there is no interrupt
|
|
|
|
* subhandler installed for the given port index i.
|
2019-03-27 13:15:45 +00:00
|
|
|
*/
|
2019-10-22 07:33:35 +00:00
|
|
|
for (i = 0; i < sh->max_port; i++) {
|
2019-03-27 13:15:45 +00:00
|
|
|
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
|
2019-10-22 07:33:35 +00:00
|
|
|
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
|
2022-03-01 12:15:13 +00:00
|
|
|
sh->port[i].nl_ih_port_id = RTE_MAX_ETHPORTS;
|
2019-10-22 07:33:35 +00:00
|
|
|
}
|
2022-02-14 09:34:59 +00:00
|
|
|
if (sh->cdev->config.devx) {
|
2021-10-19 20:55:52 +00:00
|
|
|
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
|
2019-10-30 23:53:15 +00:00
|
|
|
if (!sh->td) {
|
|
|
|
DRV_LOG(ERR, "TD allocation failure");
|
2022-02-14 09:34:55 +00:00
|
|
|
rte_errno = ENOMEM;
|
2019-10-30 23:53:15 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2021-10-21 08:56:36 +00:00
|
|
|
if (mlx5_setup_tis(sh)) {
|
2019-10-30 23:53:15 +00:00
|
|
|
DRV_LOG(ERR, "TIS allocation failure");
|
2022-02-14 09:34:55 +00:00
|
|
|
rte_errno = ENOMEM;
|
2019-10-30 23:53:15 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2021-11-03 18:35:13 +00:00
|
|
|
err = mlx5_rxtx_uars_prepare(sh);
|
2020-07-22 14:59:08 +00:00
|
|
|
if (err)
|
2020-07-19 11:13:06 +00:00
|
|
|
goto error;
|
2020-07-16 08:23:06 +00:00
|
|
|
#ifndef RTE_ARCH_64
|
2021-11-03 18:35:13 +00:00
|
|
|
} else {
|
|
|
|
/* Initialize UAR access locks for 32bit implementations. */
|
|
|
|
rte_spinlock_init(&sh->uar_lock_cq);
|
|
|
|
for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
|
|
|
|
rte_spinlock_init(&sh->uar_lock[i]);
|
2020-07-16 08:23:06 +00:00
|
|
|
#endif
|
2021-11-03 18:35:13 +00:00
|
|
|
}
|
2020-06-03 15:06:00 +00:00
|
|
|
mlx5_os_dev_shared_handler_install(sh);
|
2021-01-07 13:08:27 +00:00
|
|
|
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
|
|
|
|
err = mlx5_flow_os_init_workspace_once();
|
|
|
|
if (err)
|
|
|
|
goto error;
|
|
|
|
}
|
2022-10-31 16:08:20 +00:00
|
|
|
err = mlx5_flow_counters_mng_init(sh);
|
|
|
|
if (err) {
|
|
|
|
DRV_LOG(ERR, "Fail to initialize counters manage.");
|
|
|
|
goto error;
|
|
|
|
}
|
2020-04-29 02:25:09 +00:00
|
|
|
mlx5_flow_aging_init(sh);
|
2022-02-14 09:35:07 +00:00
|
|
|
mlx5_flow_ipool_create(sh);
|
2019-08-06 15:00:33 +00:00
|
|
|
/* Add context to the global device list. */
|
2020-06-10 09:32:27 +00:00
|
|
|
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
|
2021-01-17 10:21:20 +00:00
|
|
|
rte_spinlock_init(&sh->geneve_tlv_opt_sl);
|
2019-03-27 13:15:39 +00:00
|
|
|
exit:
|
2020-06-10 09:32:27 +00:00
|
|
|
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
|
2019-03-27 13:15:39 +00:00
|
|
|
return sh;
|
|
|
|
error:
|
2022-02-14 09:34:55 +00:00
|
|
|
err = rte_errno;
|
2020-07-16 08:23:08 +00:00
|
|
|
pthread_mutex_destroy(&sh->txpp.mutex);
|
2020-06-10 09:32:27 +00:00
|
|
|
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(sh);
|
2022-02-14 09:34:55 +00:00
|
|
|
mlx5_rxtx_uars_release(sh);
|
2021-10-21 08:56:36 +00:00
|
|
|
i = 0;
|
|
|
|
do {
|
|
|
|
if (sh->tis[i])
|
|
|
|
claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
|
|
|
|
} while (++i < (uint32_t)sh->bond.n_port);
|
2022-02-14 09:34:55 +00:00
|
|
|
if (sh->td)
|
|
|
|
claim_zero(mlx5_devx_cmd_destroy(sh->td));
|
2020-06-28 09:02:44 +00:00
|
|
|
mlx5_free(sh);
|
2019-03-27 13:15:39 +00:00
|
|
|
rte_errno = err;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-06-16 08:41:51 +00:00
|
|
|
/**
|
|
|
|
* Create LWM event_channel and interrupt handle for shared device
|
|
|
|
* context. All rxqs sharing the device context share the event_channel.
|
|
|
|
* A callback is registered in interrupt thread to receive the LWM event.
|
|
|
|
*
|
|
|
|
* @param[in] priv
|
|
|
|
* Pointer to mlx5_priv instance.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, negative with rte_errno set.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_lwm_setup(struct mlx5_priv *priv)
|
|
|
|
{
|
|
|
|
int fd_lwm;
|
|
|
|
|
|
|
|
pthread_mutex_init(&priv->sh->lwm_config_lock, NULL);
|
|
|
|
priv->sh->devx_channel_lwm = mlx5_os_devx_create_event_channel
|
|
|
|
(priv->sh->cdev->ctx,
|
|
|
|
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
|
|
|
|
if (!priv->sh->devx_channel_lwm)
|
|
|
|
goto err;
|
|
|
|
fd_lwm = mlx5_os_get_devx_channel_fd(priv->sh->devx_channel_lwm);
|
|
|
|
priv->sh->intr_handle_lwm = mlx5_os_interrupt_handler_create
|
|
|
|
(RTE_INTR_INSTANCE_F_SHARED, true,
|
|
|
|
fd_lwm, mlx5_dev_interrupt_handler_lwm, priv);
|
|
|
|
if (!priv->sh->intr_handle_lwm)
|
|
|
|
goto err;
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
if (priv->sh->devx_channel_lwm) {
|
|
|
|
mlx5_os_devx_destroy_event_channel
|
|
|
|
(priv->sh->devx_channel_lwm);
|
|
|
|
priv->sh->devx_channel_lwm = NULL;
|
|
|
|
}
|
|
|
|
pthread_mutex_destroy(&priv->sh->lwm_config_lock);
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Destroy LWM event_channel and interrupt handle for shared device
|
|
|
|
* context before free this context. The interrupt handler is also
|
|
|
|
* unregistered.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
|
|
|
* Pointer to shared device context.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
mlx5_lwm_unset(struct mlx5_dev_ctx_shared *sh)
|
|
|
|
{
|
|
|
|
if (sh->intr_handle_lwm) {
|
|
|
|
mlx5_os_interrupt_handler_destroy(sh->intr_handle_lwm,
|
|
|
|
mlx5_dev_interrupt_handler_lwm, (void *)-1);
|
|
|
|
sh->intr_handle_lwm = NULL;
|
|
|
|
}
|
|
|
|
if (sh->devx_channel_lwm) {
|
|
|
|
mlx5_os_devx_destroy_event_channel
|
|
|
|
(sh->devx_channel_lwm);
|
|
|
|
sh->devx_channel_lwm = NULL;
|
|
|
|
}
|
|
|
|
pthread_mutex_destroy(&sh->lwm_config_lock);
|
|
|
|
}
|
|
|
|
|
2019-03-27 13:15:39 +00:00
|
|
|
/**
|
|
|
|
* Free shared IB device context. Decrement counter and if zero free
|
|
|
|
* all allocated resources and close handles.
|
|
|
|
*
|
|
|
|
* @param[in] sh
|
2020-06-03 15:05:55 +00:00
|
|
|
* Pointer to mlx5_dev_ctx_shared object to free
|
2019-03-27 13:15:39 +00:00
|
|
|
*/
|
2020-06-03 15:06:00 +00:00
|
|
|
void
|
2020-06-10 09:32:27 +00:00
|
|
|
mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
|
2019-03-27 13:15:39 +00:00
|
|
|
{
|
2021-10-18 22:43:53 +00:00
|
|
|
int ret;
|
2021-10-21 08:56:36 +00:00
|
|
|
int i = 0;
|
2021-10-18 22:43:53 +00:00
|
|
|
|
2020-06-10 09:32:27 +00:00
|
|
|
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
|
2020-01-30 16:14:39 +00:00
|
|
|
#ifdef RTE_LIBRTE_MLX5_DEBUG
|
2019-03-27 13:15:39 +00:00
|
|
|
/* Check the object presence in the list. */
|
2020-06-03 15:05:55 +00:00
|
|
|
struct mlx5_dev_ctx_shared *lctx;
|
2019-03-27 13:15:39 +00:00
|
|
|
|
2020-06-10 09:32:27 +00:00
|
|
|
LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next)
|
2019-03-27 13:15:39 +00:00
|
|
|
if (lctx == sh)
|
|
|
|
break;
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(lctx);
|
2019-03-27 13:15:39 +00:00
|
|
|
if (lctx != sh) {
|
|
|
|
DRV_LOG(ERR, "Freeing non-existing shared IB context");
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
#endif
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(sh);
|
|
|
|
MLX5_ASSERT(sh->refcnt);
|
2019-03-27 13:15:39 +00:00
|
|
|
/* Secondary process should not free the shared context. */
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
|
2019-03-27 13:15:39 +00:00
|
|
|
if (--sh->refcnt)
|
|
|
|
goto exit;
|
2021-10-18 22:43:53 +00:00
|
|
|
/* Stop watching for mempool events and unregister all mempools. */
|
2021-10-19 20:56:02 +00:00
|
|
|
if (!sh->cdev->config.mr_mempool_reg_en) {
|
2021-10-18 22:43:53 +00:00
|
|
|
ret = rte_mempool_event_callback_unregister
|
|
|
|
(mlx5_dev_ctx_shared_rx_mempool_event_cb, sh);
|
2021-10-19 20:56:02 +00:00
|
|
|
if (ret == 0)
|
|
|
|
rte_mempool_walk
|
|
|
|
(mlx5_dev_ctx_shared_rx_mempool_unregister_cb, sh);
|
|
|
|
}
|
2019-08-06 15:00:33 +00:00
|
|
|
/* Remove context from the global device list. */
|
2019-03-27 13:15:39 +00:00
|
|
|
LIST_REMOVE(sh, next);
|
2021-10-14 08:55:28 +00:00
|
|
|
/* Release resources on the last device removal. */
|
|
|
|
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
|
|
|
|
mlx5_os_net_cleanup();
|
2021-01-07 13:08:27 +00:00
|
|
|
mlx5_flow_os_release_workspace();
|
2021-10-14 08:55:28 +00:00
|
|
|
}
|
2020-07-21 12:03:38 +00:00
|
|
|
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
|
2021-11-02 08:53:43 +00:00
|
|
|
if (sh->flex_parsers_dv) {
|
|
|
|
mlx5_list_destroy(sh->flex_parsers_dv);
|
|
|
|
sh->flex_parsers_dv = NULL;
|
|
|
|
}
|
2019-03-27 13:15:45 +00:00
|
|
|
/*
|
|
|
|
* Ensure there is no async event handler installed.
|
|
|
|
* Only primary process handles async device events.
|
|
|
|
**/
|
2019-07-16 14:34:53 +00:00
|
|
|
mlx5_flow_counters_mng_close(sh);
|
2022-02-14 09:34:54 +00:00
|
|
|
if (sh->ct_mng)
|
|
|
|
mlx5_flow_aso_ct_mng_close(sh);
|
2020-11-01 17:57:49 +00:00
|
|
|
if (sh->aso_age_mng) {
|
|
|
|
mlx5_flow_aso_age_mng_close(sh);
|
|
|
|
sh->aso_age_mng = NULL;
|
|
|
|
}
|
2021-04-20 10:55:17 +00:00
|
|
|
if (sh->mtrmng)
|
|
|
|
mlx5_aso_flow_mtrs_mng_close(sh);
|
2020-04-16 02:42:02 +00:00
|
|
|
mlx5_flow_ipool_destroy(sh);
|
2020-06-03 15:06:00 +00:00
|
|
|
mlx5_os_dev_shared_handler_uninstall(sh);
|
2021-11-03 18:35:13 +00:00
|
|
|
mlx5_rxtx_uars_release(sh);
|
2021-10-21 08:56:36 +00:00
|
|
|
do {
|
|
|
|
if (sh->tis[i])
|
|
|
|
claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
|
|
|
|
} while (++i < sh->bond.n_port);
|
2019-10-30 23:53:15 +00:00
|
|
|
if (sh->td)
|
|
|
|
claim_zero(mlx5_devx_cmd_destroy(sh->td));
|
2022-11-16 09:36:54 +00:00
|
|
|
#ifdef HAVE_MLX5_HWS_SUPPORT
|
|
|
|
/* HWS manages geneve_tlv_option resource as global. */
|
|
|
|
if (sh->config.dv_flow_en == 2)
|
|
|
|
flow_dev_geneve_tlv_option_resource_release(sh);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
|
2020-07-16 08:23:08 +00:00
|
|
|
pthread_mutex_destroy(&sh->txpp.mutex);
|
2022-06-16 08:41:51 +00:00
|
|
|
mlx5_lwm_unset(sh);
|
2020-06-28 09:02:44 +00:00
|
|
|
mlx5_free(sh);
|
2020-07-21 12:03:38 +00:00
|
|
|
return;
|
2019-03-27 13:15:39 +00:00
|
|
|
exit:
|
2020-06-10 09:32:27 +00:00
|
|
|
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
|
2019-03-27 13:15:39 +00:00
|
|
|
}
|
|
|
|
|
2019-11-17 12:14:54 +00:00
|
|
|
/**
|
2020-10-28 09:33:33 +00:00
|
|
|
* Destroy table hash list.
|
2019-11-17 12:14:54 +00:00
|
|
|
*
|
|
|
|
* @param[in] priv
|
|
|
|
* Pointer to the private device data structure.
|
|
|
|
*/
|
2020-06-03 15:06:00 +00:00
|
|
|
void
|
2019-11-17 12:14:54 +00:00
|
|
|
mlx5_free_table_hash_list(struct mlx5_priv *priv)
|
|
|
|
{
|
2020-06-03 15:05:55 +00:00
|
|
|
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
2022-02-24 13:40:44 +00:00
|
|
|
struct mlx5_hlist **tbls = (priv->sh->config.dv_flow_en == 2) ?
|
|
|
|
&sh->groups : &sh->flow_tbls;
|
|
|
|
if (*tbls == NULL)
|
2019-11-17 12:14:54 +00:00
|
|
|
return;
|
2022-02-24 13:40:44 +00:00
|
|
|
mlx5_hlist_destroy(*tbls);
|
|
|
|
*tbls = NULL;
|
2019-11-17 12:14:54 +00:00
|
|
|
}
|
|
|
|
|
2022-10-20 15:57:48 +00:00
|
|
|
#ifdef HAVE_MLX5_HWS_SUPPORT
|
2022-02-24 13:40:44 +00:00
|
|
|
/**
|
|
|
|
* Allocate HW steering group hash list.
|
|
|
|
*
|
|
|
|
* @param[in] priv
|
|
|
|
* Pointer to the private device data structure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_alloc_hw_group_hash_list(struct mlx5_priv *priv)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
|
|
|
char s[MLX5_NAME_SIZE];
|
|
|
|
|
|
|
|
MLX5_ASSERT(sh);
|
|
|
|
snprintf(s, sizeof(s), "%s_flow_groups", priv->sh->ibdev_name);
|
|
|
|
sh->groups = mlx5_hlist_create
|
|
|
|
(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
|
|
|
|
false, true, sh,
|
|
|
|
flow_hw_grp_create_cb,
|
|
|
|
flow_hw_grp_match_cb,
|
|
|
|
flow_hw_grp_remove_cb,
|
|
|
|
flow_hw_grp_clone_cb,
|
|
|
|
flow_hw_grp_clone_free_cb);
|
|
|
|
if (!sh->groups) {
|
|
|
|
DRV_LOG(ERR, "flow groups with hash creation failed.");
|
|
|
|
err = ENOMEM;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2019-11-17 12:14:54 +00:00
|
|
|
/**
|
|
|
|
* Initialize flow table hash list and create the root tables entry
|
|
|
|
* for each domain.
|
|
|
|
*
|
|
|
|
* @param[in] priv
|
|
|
|
* Pointer to the private device data structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Zero on success, positive error code otherwise.
|
|
|
|
*/
|
2020-06-03 15:06:00 +00:00
|
|
|
int
|
2020-10-28 09:33:33 +00:00
|
|
|
mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused)
|
2019-11-17 12:14:54 +00:00
|
|
|
{
|
2020-10-28 09:33:33 +00:00
|
|
|
int err = 0;
|
2022-02-24 13:40:44 +00:00
|
|
|
|
2020-10-28 09:33:33 +00:00
|
|
|
/* Tables are only used in DV and DR modes. */
|
2022-11-06 03:26:16 +00:00
|
|
|
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
|
2020-06-03 15:05:55 +00:00
|
|
|
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
2021-07-13 08:44:50 +00:00
|
|
|
char s[MLX5_NAME_SIZE];
|
2019-11-17 12:14:54 +00:00
|
|
|
|
2022-11-06 03:26:16 +00:00
|
|
|
#ifdef HAVE_MLX5_HWS_SUPPORT
|
2022-02-24 13:40:44 +00:00
|
|
|
if (priv->sh->config.dv_flow_en == 2)
|
|
|
|
return mlx5_alloc_hw_group_hash_list(priv);
|
2022-11-06 03:26:16 +00:00
|
|
|
#endif
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(sh);
|
2019-11-17 12:14:54 +00:00
|
|
|
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
|
2020-10-28 09:33:31 +00:00
|
|
|
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE,
|
2021-07-13 08:44:50 +00:00
|
|
|
false, true, sh,
|
|
|
|
flow_dv_tbl_create_cb,
|
2020-12-03 02:18:52 +00:00
|
|
|
flow_dv_tbl_match_cb,
|
2021-07-13 08:44:50 +00:00
|
|
|
flow_dv_tbl_remove_cb,
|
|
|
|
flow_dv_tbl_clone_cb,
|
|
|
|
flow_dv_tbl_clone_free_cb);
|
2019-11-17 12:14:54 +00:00
|
|
|
if (!sh->flow_tbls) {
|
2020-06-17 13:53:24 +00:00
|
|
|
DRV_LOG(ERR, "flow tables with hash creation failed.");
|
2019-11-17 12:14:54 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
#ifndef HAVE_MLX5DV_DR
|
2020-10-28 09:33:33 +00:00
|
|
|
struct rte_flow_error error;
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id];
|
|
|
|
|
2019-11-17 12:14:54 +00:00
|
|
|
/*
|
|
|
|
* In case we have not DR support, the zero tables should be created
|
|
|
|
* because DV expect to see them even if they cannot be created by
|
|
|
|
* RDMA-CORE.
|
|
|
|
*/
|
2021-04-20 10:55:21 +00:00
|
|
|
if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0,
|
|
|
|
NULL, 0, 1, 0, &error) ||
|
|
|
|
!flow_dv_tbl_resource_get(dev, 0, 1, 0, 0,
|
|
|
|
NULL, 0, 1, 0, &error) ||
|
|
|
|
!flow_dv_tbl_resource_get(dev, 0, 0, 1, 0,
|
|
|
|
NULL, 0, 1, 0, &error)) {
|
2019-11-17 12:14:54 +00:00
|
|
|
err = ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
error:
|
|
|
|
mlx5_free_table_hash_list(priv);
|
|
|
|
#endif /* HAVE_MLX5DV_DR */
|
2020-10-28 09:33:33 +00:00
|
|
|
#endif
|
2019-11-17 12:14:54 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-03-17 15:38:57 +00:00
|
|
|
/**
|
|
|
|
* Retrieve integer value from environment variable.
|
|
|
|
*
|
|
|
|
* @param[in] name
|
|
|
|
* Environment variable name.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* Integer value, 0 if the variable is not set.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_getenv_int(const char *name)
|
|
|
|
{
|
|
|
|
const char *val = getenv(name);
|
|
|
|
|
|
|
|
if (val == NULL)
|
|
|
|
return 0;
|
|
|
|
return atoi(val);
|
|
|
|
}
|
|
|
|
|
2019-08-22 10:15:52 +00:00
|
|
|
/**
|
|
|
|
* DPDK callback to add udp tunnel port
|
|
|
|
*
|
|
|
|
* @param[in] dev
|
|
|
|
* A pointer to eth_dev
|
|
|
|
* @param[in] udp_tunnel
|
|
|
|
* A pointer to udp tunnel
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on valid udp ports and tunnels, -ENOTSUP otherwise.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
struct rte_eth_udp_tunnel *udp_tunnel)
|
|
|
|
{
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(udp_tunnel != NULL);
|
2021-10-22 11:03:12 +00:00
|
|
|
if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN &&
|
2019-08-22 10:15:52 +00:00
|
|
|
udp_tunnel->udp_port == 4789)
|
|
|
|
return 0;
|
2021-10-22 11:03:12 +00:00
|
|
|
if (udp_tunnel->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN_GPE &&
|
2019-08-22 10:15:52 +00:00
|
|
|
udp_tunnel->udp_port == 4790)
|
|
|
|
return 0;
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2019-04-10 18:41:17 +00:00
|
|
|
/**
|
|
|
|
* Initialize process private data structure.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
mlx5_proc_priv_init(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
|
|
|
struct mlx5_proc_priv *ppriv;
|
|
|
|
size_t ppriv_size;
|
|
|
|
|
2021-05-13 03:39:54 +00:00
|
|
|
mlx5_proc_priv_uninit(dev);
|
2019-04-10 18:41:17 +00:00
|
|
|
/*
|
|
|
|
* UAR register table follows the process private structure. BlueFlame
|
|
|
|
* registers for Tx queues are stored in the table.
|
|
|
|
*/
|
2021-11-03 18:35:13 +00:00
|
|
|
ppriv_size = sizeof(struct mlx5_proc_priv) +
|
|
|
|
priv->txqs_n * sizeof(struct mlx5_uar_data);
|
2021-01-24 11:02:04 +00:00
|
|
|
ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size,
|
|
|
|
RTE_CACHE_LINE_SIZE, dev->device->numa_node);
|
2019-04-10 18:41:17 +00:00
|
|
|
if (!ppriv) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2021-01-24 11:02:04 +00:00
|
|
|
ppriv->uar_table_sz = priv->txqs_n;
|
2019-04-10 18:41:17 +00:00
|
|
|
dev->process_private = ppriv;
|
2021-11-03 18:35:12 +00:00
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
|
|
|
|
priv->sh->pppriv = ppriv;
|
2019-04-10 18:41:17 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Un-initialize process private data structure.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*/
|
2021-01-24 11:02:05 +00:00
|
|
|
void
|
2019-04-10 18:41:17 +00:00
|
|
|
mlx5_proc_priv_uninit(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
if (!dev->process_private)
|
|
|
|
return;
|
2020-06-28 09:02:44 +00:00
|
|
|
mlx5_free(dev->process_private);
|
2019-04-10 18:41:17 +00:00
|
|
|
dev->process_private = NULL;
|
|
|
|
}
|
|
|
|
|
2015-10-30 18:52:30 +00:00
|
|
|
/**
|
|
|
|
* DPDK callback to close the device.
|
|
|
|
*
|
|
|
|
* Destroy all queues and objects, free memory.
|
|
|
|
*
|
|
|
|
* @param dev
|
|
|
|
* Pointer to Ethernet device structure.
|
|
|
|
*/
|
2020-09-28 23:14:10 +00:00
|
|
|
int
|
2015-10-30 18:52:30 +00:00
|
|
|
mlx5_dev_close(struct rte_eth_dev *dev)
|
|
|
|
{
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
2015-10-30 18:52:31 +00:00
|
|
|
unsigned int i;
|
2017-10-09 14:44:42 +00:00
|
|
|
int ret;
|
2015-10-30 18:52:30 +00:00
|
|
|
|
2020-05-28 06:59:49 +00:00
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
|
|
|
|
/* Check if process_private released. */
|
|
|
|
if (!dev->process_private)
|
2020-09-28 23:14:10 +00:00
|
|
|
return 0;
|
2020-05-28 06:59:49 +00:00
|
|
|
mlx5_tx_uar_uninit_secondary(dev);
|
|
|
|
mlx5_proc_priv_uninit(dev);
|
|
|
|
rte_eth_dev_release_port(dev);
|
2020-09-28 23:14:10 +00:00
|
|
|
return 0;
|
2020-05-28 06:59:49 +00:00
|
|
|
}
|
|
|
|
if (!priv->sh)
|
2020-09-28 23:14:10 +00:00
|
|
|
return 0;
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(DEBUG, "port %u closing device \"%s\"",
|
|
|
|
dev->data->port_id,
|
2021-10-19 20:55:52 +00:00
|
|
|
((priv->sh->cdev->ctx != NULL) ?
|
|
|
|
mlx5_os_get_ctx_device_name(priv->sh->cdev->ctx) : ""));
|
2020-03-24 15:33:57 +00:00
|
|
|
/*
|
|
|
|
* If default mreg copy action is removed at the stop stage,
|
|
|
|
* the search will return none and nothing will be done anymore.
|
|
|
|
*/
|
2022-11-10 02:09:49 +00:00
|
|
|
if (priv->sh->config.dv_flow_en != 2)
|
|
|
|
mlx5_flow_stop_default(dev);
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_traffic_disable(dev);
|
2020-03-24 15:33:57 +00:00
|
|
|
/*
|
|
|
|
* If all the flows are already flushed in the device stop stage,
|
|
|
|
* then this will return directly without any action.
|
|
|
|
*/
|
2021-07-13 08:44:39 +00:00
|
|
|
mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
|
ethdev: introduce indirect flow action
Right now, rte_flow_shared_action_* APIs are used for some shared
actions, like RSS, count. The shared action should be created before
using it inside a flow. These shared actions sometimes are not
really shared but just some indirect actions decoupled from a flow.
The new functions rte_flow_action_handle_* are added to replace
the current shared functions rte_flow_shared_action_*.
There are two types of flow actions:
1. the direct (normal) actions that could be created and stored
within a flow rule. Such action is tied to its flow rule and
cannot be reused.
2. the indirect action, in the past, named shared_action. It is
created from a direct actioni, like count or rss, and then used
in the flow rules with an object handle. The PMD will take care
of the retrieve from indirect action to the direct action
when it is referenced.
The indirect action is accessed (update / query) w/o any flow rule,
just via the action object handle. For example, when querying or
resetting a counter, it could be done out of any flow using this
counter, but only the handle of the counter action object is
required.
The indirect action object could be shared by different flows or
used by a single flow, depending on the direct action type and
the real-life requirements.
The handle of an indirect action object is opaque and defined in
each driver and possibly different per direct action type.
The old name "shared" is improper in a sense and should be replaced.
Since the APIs are changed from "rte_flow_shared_action*" to the new
"rte_flow_action_handle*", the testpmd application code and command
line interfaces also need to be updated to do the adaption.
The testpmd application user guide is also updated. All the "shared
action" related parts are replaced with "indirect action" to have a
correct explanation.
The parameter of "update" interface is also changed. A general
pointer will replace the rte_flow_action struct pointer due to the
facts:
1. Some action may not support fields updating. In the example of a
counter, the only "update" supported should be the reset. So
passing a rte_flow_action struct pointer is meaningless and
there is even no such corresponding action struct. What's more,
if more than one operations should be supported, for some other
action, such pointer parameter may not meet the need.
2. Some action may need conditional or partial update, the current
parameter will not provide the ability to indicate which part(s)
to update.
For different types of indirect action objects, the pointer could
either be the same of rte_flow_action* struct - in order not to
break the current driver implementation, or some wrapper
structures with bits as masks to indicate which part to be
updated, depending on real needs of the corresponding direct
action. For different direct actions, the structures of indirect
action objects updating will be different.
All the underlayer PMD callbacks will be moved to these new APIs.
The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to
break the ABI. All the implementations are changed by using
RTE_FLOW_ACTION_TYPE_INDIRECT.
Since the APIs are changed from "rte_flow_shared_action*" to the new
"rte_flow_action_handle*" and the "update" interface's 3rd input
parameter is changed to generic pointer, the mlx5 PMD that uses these
APIs needs to do the adaption to the new APIs as well.
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Andrey Vesnovaty <andreyv@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
|
|
|
mlx5_action_handle_flush(dev);
|
2019-11-08 03:49:25 +00:00
|
|
|
mlx5_flow_meter_flush(dev, NULL);
|
2015-10-30 18:52:31 +00:00
|
|
|
/* Prevent crashes when queues are still in use. */
|
2022-02-11 19:11:42 +00:00
|
|
|
dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
|
|
|
|
dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
|
2019-04-01 21:12:56 +00:00
|
|
|
rte_wmb();
|
|
|
|
/* Disable datapath on secondary process. */
|
2020-07-19 10:18:15 +00:00
|
|
|
mlx5_mp_os_req_stop_rxtx(dev);
|
2020-07-17 07:11:49 +00:00
|
|
|
/* Free the eCPRI flex parser resource. */
|
|
|
|
mlx5_flex_parser_ecpri_release(dev);
|
2021-11-02 08:53:42 +00:00
|
|
|
mlx5_flex_item_port_cleanup(dev);
|
2022-10-20 15:57:48 +00:00
|
|
|
#ifdef HAVE_MLX5_HWS_SUPPORT
|
2022-10-20 15:41:39 +00:00
|
|
|
flow_hw_destroy_vport_action(dev);
|
2022-02-24 13:40:41 +00:00
|
|
|
flow_hw_resource_release(dev);
|
2022-10-20 15:57:34 +00:00
|
|
|
flow_hw_clear_port_info(dev);
|
2022-10-20 15:41:43 +00:00
|
|
|
if (priv->sh->config.dv_flow_en == 2) {
|
|
|
|
flow_hw_clear_flow_metadata_config();
|
2022-10-20 15:57:36 +00:00
|
|
|
flow_hw_clear_tags_set(dev);
|
2022-10-20 15:41:43 +00:00
|
|
|
}
|
2022-02-24 13:40:41 +00:00
|
|
|
#endif
|
2021-11-04 12:33:18 +00:00
|
|
|
if (priv->rxq_privs != NULL) {
|
2015-10-30 18:52:31 +00:00
|
|
|
/* XXX race condition if mlx5_rx_burst() is still running. */
|
2020-12-28 09:54:09 +00:00
|
|
|
rte_delay_us_sleep(1000);
|
2017-10-09 14:44:49 +00:00
|
|
|
for (i = 0; (i != priv->rxqs_n); ++i)
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_rxq_release(dev, i);
|
2015-10-30 18:52:31 +00:00
|
|
|
priv->rxqs_n = 0;
|
2021-11-04 12:33:13 +00:00
|
|
|
mlx5_free(priv->rxq_privs);
|
|
|
|
priv->rxq_privs = NULL;
|
|
|
|
}
|
2015-10-30 18:52:31 +00:00
|
|
|
if (priv->txqs != NULL) {
|
|
|
|
/* XXX race condition if mlx5_tx_burst() is still running. */
|
2020-12-28 09:54:09 +00:00
|
|
|
rte_delay_us_sleep(1000);
|
2017-10-09 14:44:48 +00:00
|
|
|
for (i = 0; (i != priv->txqs_n); ++i)
|
2018-03-05 12:21:04 +00:00
|
|
|
mlx5_txq_release(dev, i);
|
2015-10-30 18:52:31 +00:00
|
|
|
priv->txqs_n = 0;
|
|
|
|
priv->txqs = NULL;
|
|
|
|
}
|
2019-04-10 18:41:17 +00:00
|
|
|
mlx5_proc_priv_uninit(dev);
|
2021-02-25 10:45:01 +00:00
|
|
|
if (priv->q_counters) {
|
|
|
|
mlx5_devx_cmd_destroy(priv->q_counters);
|
|
|
|
priv->q_counters = NULL;
|
|
|
|
}
|
2020-10-28 09:33:30 +00:00
|
|
|
if (priv->drop_queue.hrxq)
|
|
|
|
mlx5_drop_action_destroy(dev);
|
2019-11-07 17:10:04 +00:00
|
|
|
if (priv->mreg_cp_tbl)
|
2020-10-28 09:33:31 +00:00
|
|
|
mlx5_hlist_destroy(priv->mreg_cp_tbl);
|
2018-05-09 11:13:50 +00:00
|
|
|
mlx5_mprq_free_mp(dev);
|
2020-06-03 15:06:00 +00:00
|
|
|
mlx5_os_free_shared_dr(priv);
|
2017-10-09 14:44:56 +00:00
|
|
|
if (priv->rss_conf.rss_key != NULL)
|
2020-06-28 07:35:26 +00:00
|
|
|
mlx5_free(priv->rss_conf.rss_key);
|
2015-11-02 18:11:57 +00:00
|
|
|
if (priv->reta_idx != NULL)
|
2020-06-28 07:35:26 +00:00
|
|
|
mlx5_free(priv->reta_idx);
|
2022-02-14 09:35:06 +00:00
|
|
|
if (priv->sh->dev_cap.vf)
|
2020-08-25 09:31:09 +00:00
|
|
|
mlx5_os_mac_addr_flush(dev);
|
2018-07-10 16:04:52 +00:00
|
|
|
if (priv->nl_socket_route >= 0)
|
|
|
|
close(priv->nl_socket_route);
|
|
|
|
if (priv->nl_socket_rdma >= 0)
|
|
|
|
close(priv->nl_socket_rdma);
|
2019-07-30 09:20:24 +00:00
|
|
|
if (priv->vmwa_context)
|
|
|
|
mlx5_vlan_vmwa_exit(priv->vmwa_context);
|
2019-07-22 14:52:13 +00:00
|
|
|
ret = mlx5_hrxq_verify(dev);
|
2017-10-09 14:44:51 +00:00
|
|
|
if (ret)
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
|
|
|
|
dev->data->port_id);
|
2019-07-22 14:52:12 +00:00
|
|
|
ret = mlx5_ind_table_obj_verify(dev);
|
2017-10-09 14:44:50 +00:00
|
|
|
if (ret)
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(WARNING, "port %u some indirection table still remain",
|
|
|
|
dev->data->port_id);
|
2019-07-22 14:52:11 +00:00
|
|
|
ret = mlx5_rxq_obj_verify(dev);
|
2017-10-09 14:44:46 +00:00
|
|
|
if (ret)
|
2019-07-22 14:52:11 +00:00
|
|
|
DRV_LOG(WARNING, "port %u some Rx queue objects still remain",
|
2018-03-13 09:23:56 +00:00
|
|
|
dev->data->port_id);
|
2022-02-24 23:25:11 +00:00
|
|
|
ret = mlx5_ext_rxq_verify(dev);
|
|
|
|
if (ret)
|
|
|
|
DRV_LOG(WARNING, "Port %u some external RxQ still remain.",
|
|
|
|
dev->data->port_id);
|
2018-03-05 12:21:04 +00:00
|
|
|
ret = mlx5_rxq_verify(dev);
|
2017-10-09 14:44:49 +00:00
|
|
|
if (ret)
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(WARNING, "port %u some Rx queues still remain",
|
|
|
|
dev->data->port_id);
|
2019-10-30 23:53:14 +00:00
|
|
|
ret = mlx5_txq_obj_verify(dev);
|
2017-10-09 14:44:47 +00:00
|
|
|
if (ret)
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
|
|
|
|
dev->data->port_id);
|
2018-03-05 12:21:04 +00:00
|
|
|
ret = mlx5_txq_verify(dev);
|
2017-10-09 14:44:48 +00:00
|
|
|
if (ret)
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(WARNING, "port %u some Tx queues still remain",
|
|
|
|
dev->data->port_id);
|
2018-03-05 12:21:04 +00:00
|
|
|
ret = mlx5_flow_verify(dev);
|
2017-10-09 14:44:42 +00:00
|
|
|
if (ret)
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(WARNING, "port %u some flows still remain",
|
|
|
|
dev->data->port_id);
|
2021-07-13 08:44:46 +00:00
|
|
|
if (priv->hrxqs)
|
|
|
|
mlx5_list_destroy(priv->hrxqs);
|
2022-02-24 23:25:10 +00:00
|
|
|
mlx5_free(priv->ext_rxqs);
|
2022-11-17 15:28:07 +00:00
|
|
|
priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
|
|
|
|
/*
|
|
|
|
* The interrupt handler port id must be reset before priv is reset
|
|
|
|
* since 'mlx5_dev_interrupt_nl_cb' uses priv.
|
|
|
|
*/
|
|
|
|
rte_io_wmb();
|
2020-05-28 06:59:49 +00:00
|
|
|
/*
|
|
|
|
* Free the shared context in last turn, because the cleanup
|
|
|
|
* routines above may use some shared fields, like
|
2021-11-29 16:08:02 +00:00
|
|
|
* mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
|
2020-05-28 06:59:49 +00:00
|
|
|
* ifindex if Netlink fails.
|
|
|
|
*/
|
2020-06-10 09:32:27 +00:00
|
|
|
mlx5_free_shared_dev_ctx(priv->sh);
|
2018-07-10 16:04:54 +00:00
|
|
|
if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
|
|
|
|
unsigned int c = 0;
|
2019-04-01 02:26:59 +00:00
|
|
|
uint16_t port_id;
|
2018-07-10 16:04:54 +00:00
|
|
|
|
2021-07-21 14:37:33 +00:00
|
|
|
MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
|
2019-02-21 09:29:14 +00:00
|
|
|
struct mlx5_priv *opriv =
|
2019-04-01 02:26:59 +00:00
|
|
|
rte_eth_devices[port_id].data->dev_private;
|
2018-07-10 16:04:54 +00:00
|
|
|
|
|
|
|
if (!opriv ||
|
|
|
|
opriv->domain_id != priv->domain_id ||
|
2019-04-01 02:26:59 +00:00
|
|
|
&rte_eth_devices[port_id] == dev)
|
2018-07-10 16:04:54 +00:00
|
|
|
continue;
|
|
|
|
++c;
|
2019-09-25 07:53:33 +00:00
|
|
|
break;
|
2018-07-10 16:04:54 +00:00
|
|
|
}
|
|
|
|
if (!c)
|
|
|
|
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
|
|
|
|
}
|
2015-10-30 18:52:30 +00:00
|
|
|
memset(priv, 0, sizeof(*priv));
|
2018-07-10 16:04:54 +00:00
|
|
|
priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
|
2018-10-23 18:26:04 +00:00
|
|
|
/*
|
|
|
|
* Reset mac_addrs to NULL such that it is not freed as part of
|
|
|
|
* rte_eth_dev_release_port(). mac_addrs is part of dev_private so
|
|
|
|
* it is freed when dev_private is freed.
|
|
|
|
*/
|
|
|
|
dev->data->mac_addrs = NULL;
|
2020-09-28 23:14:10 +00:00
|
|
|
return 0;
|
2015-10-30 18:52:30 +00:00
|
|
|
}
|
|
|
|
|
2020-12-28 12:32:56 +00:00
|
|
|
const struct eth_dev_ops mlx5_dev_ops = {
|
|
|
|
.dev_configure = mlx5_dev_configure,
|
|
|
|
.dev_start = mlx5_dev_start,
|
|
|
|
.dev_stop = mlx5_dev_stop,
|
|
|
|
.dev_set_link_down = mlx5_set_link_down,
|
|
|
|
.dev_set_link_up = mlx5_set_link_up,
|
|
|
|
.dev_close = mlx5_dev_close,
|
|
|
|
.promiscuous_enable = mlx5_promiscuous_enable,
|
|
|
|
.promiscuous_disable = mlx5_promiscuous_disable,
|
|
|
|
.allmulticast_enable = mlx5_allmulticast_enable,
|
|
|
|
.allmulticast_disable = mlx5_allmulticast_disable,
|
|
|
|
.link_update = mlx5_link_update,
|
|
|
|
.stats_get = mlx5_stats_get,
|
|
|
|
.stats_reset = mlx5_stats_reset,
|
|
|
|
.xstats_get = mlx5_xstats_get,
|
|
|
|
.xstats_reset = mlx5_xstats_reset,
|
|
|
|
.xstats_get_names = mlx5_xstats_get_names,
|
|
|
|
.fw_version_get = mlx5_fw_version_get,
|
|
|
|
.dev_infos_get = mlx5_dev_infos_get,
|
2021-03-28 13:48:08 +00:00
|
|
|
.representor_info_get = mlx5_representor_info_get,
|
2020-12-28 12:32:56 +00:00
|
|
|
.read_clock = mlx5_txpp_read_clock,
|
|
|
|
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
|
|
|
|
.vlan_filter_set = mlx5_vlan_filter_set,
|
|
|
|
.rx_queue_setup = mlx5_rx_queue_setup,
|
2022-06-16 08:41:52 +00:00
|
|
|
.rx_queue_avail_thresh_set = mlx5_rx_queue_lwm_set,
|
|
|
|
.rx_queue_avail_thresh_query = mlx5_rx_queue_lwm_query,
|
2020-12-28 12:32:56 +00:00
|
|
|
.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
|
|
|
|
.tx_queue_setup = mlx5_tx_queue_setup,
|
|
|
|
.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
|
|
|
|
.rx_queue_release = mlx5_rx_queue_release,
|
|
|
|
.tx_queue_release = mlx5_tx_queue_release,
|
|
|
|
.rx_queue_start = mlx5_rx_queue_start,
|
|
|
|
.rx_queue_stop = mlx5_rx_queue_stop,
|
|
|
|
.tx_queue_start = mlx5_tx_queue_start,
|
|
|
|
.tx_queue_stop = mlx5_tx_queue_stop,
|
|
|
|
.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
|
|
|
|
.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
|
|
|
|
.mac_addr_remove = mlx5_mac_addr_remove,
|
|
|
|
.mac_addr_add = mlx5_mac_addr_add,
|
|
|
|
.mac_addr_set = mlx5_mac_addr_set,
|
|
|
|
.set_mc_addr_list = mlx5_set_mc_addr_list,
|
|
|
|
.mtu_set = mlx5_dev_set_mtu,
|
|
|
|
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
|
|
|
|
.vlan_offload_set = mlx5_vlan_offload_set,
|
|
|
|
.reta_update = mlx5_dev_rss_reta_update,
|
|
|
|
.reta_query = mlx5_dev_rss_reta_query,
|
|
|
|
.rss_hash_update = mlx5_rss_hash_update,
|
|
|
|
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
|
2021-03-21 09:00:00 +00:00
|
|
|
.flow_ops_get = mlx5_flow_ops_get,
|
2020-12-28 12:32:56 +00:00
|
|
|
.rxq_info_get = mlx5_rxq_info_get,
|
|
|
|
.txq_info_get = mlx5_txq_info_get,
|
|
|
|
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
|
|
|
|
.tx_burst_mode_get = mlx5_tx_burst_mode_get,
|
|
|
|
.rx_queue_intr_enable = mlx5_rx_intr_enable,
|
|
|
|
.rx_queue_intr_disable = mlx5_rx_intr_disable,
|
|
|
|
.is_removed = mlx5_is_removed,
|
|
|
|
.udp_tunnel_port_add = mlx5_udp_tunnel_port_add,
|
|
|
|
.get_module_info = mlx5_get_module_info,
|
|
|
|
.get_module_eeprom = mlx5_get_module_eeprom,
|
|
|
|
.hairpin_cap_get = mlx5_hairpin_cap_get,
|
|
|
|
.mtr_ops_get = mlx5_flow_meter_ops_get,
|
|
|
|
.hairpin_bind = mlx5_hairpin_bind,
|
|
|
|
.hairpin_unbind = mlx5_hairpin_unbind,
|
|
|
|
.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
|
|
|
|
.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
|
|
|
|
.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
|
|
|
|
.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
|
2021-04-29 14:55:18 +00:00
|
|
|
.get_monitor_addr = mlx5_get_monitor_addr,
|
2020-12-28 12:32:56 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Available operations from secondary process. */
|
|
|
|
const struct eth_dev_ops mlx5_dev_sec_ops = {
|
|
|
|
.stats_get = mlx5_stats_get,
|
|
|
|
.stats_reset = mlx5_stats_reset,
|
|
|
|
.xstats_get = mlx5_xstats_get,
|
|
|
|
.xstats_reset = mlx5_xstats_reset,
|
|
|
|
.xstats_get_names = mlx5_xstats_get_names,
|
|
|
|
.fw_version_get = mlx5_fw_version_get,
|
|
|
|
.dev_infos_get = mlx5_dev_infos_get,
|
2021-07-07 11:53:26 +00:00
|
|
|
.representor_info_get = mlx5_representor_info_get,
|
2020-12-28 12:32:56 +00:00
|
|
|
.read_clock = mlx5_txpp_read_clock,
|
|
|
|
.rx_queue_start = mlx5_rx_queue_start,
|
|
|
|
.rx_queue_stop = mlx5_rx_queue_stop,
|
|
|
|
.tx_queue_start = mlx5_tx_queue_start,
|
|
|
|
.tx_queue_stop = mlx5_tx_queue_stop,
|
|
|
|
.rxq_info_get = mlx5_rxq_info_get,
|
|
|
|
.txq_info_get = mlx5_txq_info_get,
|
|
|
|
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
|
|
|
|
.tx_burst_mode_get = mlx5_tx_burst_mode_get,
|
|
|
|
.get_module_info = mlx5_get_module_info,
|
|
|
|
.get_module_eeprom = mlx5_get_module_eeprom,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Available operations in flow isolated mode. */
|
|
|
|
const struct eth_dev_ops mlx5_dev_ops_isolate = {
|
|
|
|
.dev_configure = mlx5_dev_configure,
|
|
|
|
.dev_start = mlx5_dev_start,
|
|
|
|
.dev_stop = mlx5_dev_stop,
|
|
|
|
.dev_set_link_down = mlx5_set_link_down,
|
|
|
|
.dev_set_link_up = mlx5_set_link_up,
|
|
|
|
.dev_close = mlx5_dev_close,
|
|
|
|
.promiscuous_enable = mlx5_promiscuous_enable,
|
|
|
|
.promiscuous_disable = mlx5_promiscuous_disable,
|
|
|
|
.allmulticast_enable = mlx5_allmulticast_enable,
|
|
|
|
.allmulticast_disable = mlx5_allmulticast_disable,
|
|
|
|
.link_update = mlx5_link_update,
|
|
|
|
.stats_get = mlx5_stats_get,
|
|
|
|
.stats_reset = mlx5_stats_reset,
|
|
|
|
.xstats_get = mlx5_xstats_get,
|
|
|
|
.xstats_reset = mlx5_xstats_reset,
|
|
|
|
.xstats_get_names = mlx5_xstats_get_names,
|
|
|
|
.fw_version_get = mlx5_fw_version_get,
|
|
|
|
.dev_infos_get = mlx5_dev_infos_get,
|
2021-07-07 11:53:26 +00:00
|
|
|
.representor_info_get = mlx5_representor_info_get,
|
2020-12-28 12:32:56 +00:00
|
|
|
.read_clock = mlx5_txpp_read_clock,
|
|
|
|
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
|
|
|
|
.vlan_filter_set = mlx5_vlan_filter_set,
|
|
|
|
.rx_queue_setup = mlx5_rx_queue_setup,
|
|
|
|
.rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
|
|
|
|
.tx_queue_setup = mlx5_tx_queue_setup,
|
|
|
|
.tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
|
|
|
|
.rx_queue_release = mlx5_rx_queue_release,
|
|
|
|
.tx_queue_release = mlx5_tx_queue_release,
|
|
|
|
.rx_queue_start = mlx5_rx_queue_start,
|
|
|
|
.rx_queue_stop = mlx5_rx_queue_stop,
|
|
|
|
.tx_queue_start = mlx5_tx_queue_start,
|
|
|
|
.tx_queue_stop = mlx5_tx_queue_stop,
|
|
|
|
.flow_ctrl_get = mlx5_dev_get_flow_ctrl,
|
|
|
|
.flow_ctrl_set = mlx5_dev_set_flow_ctrl,
|
|
|
|
.mac_addr_remove = mlx5_mac_addr_remove,
|
|
|
|
.mac_addr_add = mlx5_mac_addr_add,
|
|
|
|
.mac_addr_set = mlx5_mac_addr_set,
|
|
|
|
.set_mc_addr_list = mlx5_set_mc_addr_list,
|
|
|
|
.mtu_set = mlx5_dev_set_mtu,
|
|
|
|
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
|
|
|
|
.vlan_offload_set = mlx5_vlan_offload_set,
|
2021-03-21 09:00:00 +00:00
|
|
|
.flow_ops_get = mlx5_flow_ops_get,
|
2020-12-28 12:32:56 +00:00
|
|
|
.rxq_info_get = mlx5_rxq_info_get,
|
|
|
|
.txq_info_get = mlx5_txq_info_get,
|
|
|
|
.rx_burst_mode_get = mlx5_rx_burst_mode_get,
|
|
|
|
.tx_burst_mode_get = mlx5_tx_burst_mode_get,
|
|
|
|
.rx_queue_intr_enable = mlx5_rx_intr_enable,
|
|
|
|
.rx_queue_intr_disable = mlx5_rx_intr_disable,
|
|
|
|
.is_removed = mlx5_is_removed,
|
|
|
|
.get_module_info = mlx5_get_module_info,
|
|
|
|
.get_module_eeprom = mlx5_get_module_eeprom,
|
|
|
|
.hairpin_cap_get = mlx5_hairpin_cap_get,
|
|
|
|
.mtr_ops_get = mlx5_flow_meter_ops_get,
|
|
|
|
.hairpin_bind = mlx5_hairpin_bind,
|
|
|
|
.hairpin_unbind = mlx5_hairpin_unbind,
|
|
|
|
.hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports,
|
|
|
|
.hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update,
|
|
|
|
.hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind,
|
|
|
|
.hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind,
|
2021-04-29 14:55:18 +00:00
|
|
|
.get_monitor_addr = mlx5_get_monitor_addr,
|
2020-12-28 12:32:56 +00:00
|
|
|
};
|
|
|
|
|
2016-06-24 13:17:50 +00:00
|
|
|
/**
|
|
|
|
* Verify and store value for device argument.
|
|
|
|
*
|
|
|
|
* @param[in] key
|
|
|
|
* Key argument to verify.
|
|
|
|
* @param[in] val
|
|
|
|
* Value associated with key.
|
|
|
|
* @param opaque
|
|
|
|
* User data.
|
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2016-06-24 13:17:50 +00:00
|
|
|
*/
|
|
|
|
static int
|
2022-02-14 09:35:09 +00:00
|
|
|
mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
|
2016-06-24 13:17:50 +00:00
|
|
|
{
|
2022-02-14 09:35:09 +00:00
|
|
|
struct mlx5_port_config *config = opaque;
|
2020-07-16 08:23:05 +00:00
|
|
|
signed long tmp;
|
2016-06-24 13:17:50 +00:00
|
|
|
|
2018-07-10 16:04:58 +00:00
|
|
|
/* No-op, port representors are processed in mlx5_dev_spawn(). */
|
2022-02-14 09:35:11 +00:00
|
|
|
if (!strcmp(MLX5_REPRESENTOR, key))
|
2018-07-10 16:04:58 +00:00
|
|
|
return 0;
|
2016-06-24 13:17:54 +00:00
|
|
|
errno = 0;
|
2020-07-16 08:23:05 +00:00
|
|
|
tmp = strtol(val, NULL, 0);
|
2016-06-24 13:17:54 +00:00
|
|
|
if (errno) {
|
2018-03-05 12:21:06 +00:00
|
|
|
rte_errno = errno;
|
2018-03-13 09:23:56 +00:00
|
|
|
DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
|
2018-03-05 12:21:06 +00:00
|
|
|
return -rte_errno;
|
2016-06-24 13:17:54 +00:00
|
|
|
}
|
2022-02-14 09:35:07 +00:00
|
|
|
if (tmp < 0) {
|
2020-07-16 08:23:05 +00:00
|
|
|
/* Negative values are acceptable for some keys only. */
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2016-06-24 13:17:54 +00:00
|
|
|
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
|
2020-11-01 16:27:39 +00:00
|
|
|
if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) {
|
|
|
|
DRV_LOG(ERR, "invalid CQE compression "
|
|
|
|
"format parameter");
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2018-01-10 09:16:58 +00:00
|
|
|
config->cqe_comp = !!tmp;
|
2020-11-01 16:27:39 +00:00
|
|
|
config->cqe_comp_fmt = tmp;
|
2019-01-15 17:38:58 +00:00
|
|
|
} else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) {
|
|
|
|
config->hw_padding = !!tmp;
|
2018-05-09 11:13:50 +00:00
|
|
|
} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
|
|
|
|
config->mprq.enabled = !!tmp;
|
|
|
|
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
|
2021-11-23 18:38:04 +00:00
|
|
|
config->mprq.log_stride_num = tmp;
|
2020-04-09 22:23:51 +00:00
|
|
|
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
|
2021-11-23 18:38:04 +00:00
|
|
|
config->mprq.log_stride_size = tmp;
|
2018-05-09 11:13:50 +00:00
|
|
|
} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
|
|
|
|
config->mprq.max_memcpy_len = tmp;
|
|
|
|
} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
|
|
|
|
config->mprq.min_rxqs_num = tmp;
|
2016-06-24 13:17:56 +00:00
|
|
|
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
|
2019-07-21 14:24:54 +00:00
|
|
|
DRV_LOG(WARNING, "%s: deprecated parameter,"
|
|
|
|
" converted to txq_inline_max", key);
|
|
|
|
config->txq_inline_max = tmp;
|
|
|
|
} else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) {
|
|
|
|
config->txq_inline_max = tmp;
|
|
|
|
} else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) {
|
|
|
|
config->txq_inline_min = tmp;
|
|
|
|
} else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) {
|
|
|
|
config->txq_inline_mpw = tmp;
|
2016-06-24 13:17:56 +00:00
|
|
|
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
|
2018-01-10 09:16:58 +00:00
|
|
|
config->txqs_inline = tmp;
|
2018-11-01 17:20:32 +00:00
|
|
|
} else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
|
2019-07-21 14:24:53 +00:00
|
|
|
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
|
2016-06-24 13:17:57 +00:00
|
|
|
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
|
2018-08-13 06:47:57 +00:00
|
|
|
config->mps = !!tmp;
|
2017-03-15 23:55:44 +00:00
|
|
|
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
|
2019-07-21 14:24:53 +00:00
|
|
|
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
|
2017-03-15 23:55:44 +00:00
|
|
|
} else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
|
2019-07-21 14:24:54 +00:00
|
|
|
DRV_LOG(WARNING, "%s: deprecated parameter,"
|
|
|
|
" converted to txq_inline_mpw", key);
|
|
|
|
config->txq_inline_mpw = tmp;
|
2017-08-02 15:32:56 +00:00
|
|
|
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
|
2019-07-21 14:24:53 +00:00
|
|
|
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
|
2017-08-02 15:32:56 +00:00
|
|
|
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
|
2018-01-10 09:16:58 +00:00
|
|
|
config->rx_vec_en = !!tmp;
|
2019-05-30 10:20:32 +00:00
|
|
|
} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
|
|
|
|
config->max_dump_files_num = tmp;
|
2019-07-22 14:51:59 +00:00
|
|
|
} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
|
2022-02-14 09:35:06 +00:00
|
|
|
config->lro_timeout = tmp;
|
2020-03-24 12:59:01 +00:00
|
|
|
} else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
|
|
|
|
config->log_hp_size = tmp;
|
2021-11-05 15:30:38 +00:00
|
|
|
} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
|
2021-11-12 14:38:42 +00:00
|
|
|
config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
|
|
|
|
config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
|
2016-06-24 13:17:54 +00:00
|
|
|
}
|
|
|
|
return 0;
|
2016-06-24 13:17:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2022-02-14 09:35:09 +00:00
|
|
|
* Parse user port parameters and adjust them according to device capabilities.
|
2016-06-24 13:17:50 +00:00
|
|
|
*
|
2022-02-14 09:35:09 +00:00
|
|
|
* @param priv
|
|
|
|
* Pointer to shared device context.
|
2022-02-14 09:35:11 +00:00
|
|
|
* @param mkvlist
|
|
|
|
* Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
|
2022-02-14 09:35:09 +00:00
|
|
|
* @param config
|
|
|
|
* Pointer to port configuration structure.
|
2016-06-24 13:17:50 +00:00
|
|
|
*
|
|
|
|
* @return
|
2018-03-05 12:21:06 +00:00
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
2016-06-24 13:17:50 +00:00
|
|
|
*/
|
2020-06-03 15:06:00 +00:00
|
|
|
int
|
2022-02-14 09:35:11 +00:00
|
|
|
mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist,
|
2022-02-14 09:35:09 +00:00
|
|
|
struct mlx5_port_config *config)
|
2016-06-24 13:17:50 +00:00
|
|
|
{
|
2022-02-14 09:35:09 +00:00
|
|
|
struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
|
|
|
|
struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
|
|
|
|
bool devx = priv->sh->cdev->config.devx;
|
2022-02-14 09:35:11 +00:00
|
|
|
const char **params = (const char *[]){
|
|
|
|
MLX5_RXQ_CQE_COMP_EN,
|
|
|
|
MLX5_RXQ_PKT_PAD_EN,
|
|
|
|
MLX5_RX_MPRQ_EN,
|
|
|
|
MLX5_RX_MPRQ_LOG_STRIDE_NUM,
|
|
|
|
MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
|
|
|
|
MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
|
|
|
|
MLX5_RXQS_MIN_MPRQ,
|
|
|
|
MLX5_TXQ_INLINE,
|
|
|
|
MLX5_TXQ_INLINE_MIN,
|
|
|
|
MLX5_TXQ_INLINE_MAX,
|
|
|
|
MLX5_TXQ_INLINE_MPW,
|
|
|
|
MLX5_TXQS_MIN_INLINE,
|
|
|
|
MLX5_TXQS_MAX_VEC,
|
|
|
|
MLX5_TXQ_MPW_EN,
|
|
|
|
MLX5_TXQ_MPW_HDR_DSEG_EN,
|
|
|
|
MLX5_TXQ_MAX_INLINE_LEN,
|
|
|
|
MLX5_TX_VEC_EN,
|
|
|
|
MLX5_RX_VEC_EN,
|
|
|
|
MLX5_REPRESENTOR,
|
|
|
|
MLX5_MAX_DUMP_FILES_NUM,
|
|
|
|
MLX5_LRO_TIMEOUT_USEC,
|
|
|
|
MLX5_HP_BUF_SIZE,
|
|
|
|
MLX5_DELAY_DROP,
|
|
|
|
NULL,
|
|
|
|
};
|
2016-06-24 13:17:50 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2022-02-14 09:35:09 +00:00
|
|
|
/* Default configuration. */
|
|
|
|
memset(config, 0, sizeof(*config));
|
|
|
|
config->mps = MLX5_ARG_UNSET;
|
|
|
|
config->cqe_comp = 1;
|
|
|
|
config->rx_vec_en = 1;
|
|
|
|
config->txq_inline_max = MLX5_ARG_UNSET;
|
|
|
|
config->txq_inline_min = MLX5_ARG_UNSET;
|
|
|
|
config->txq_inline_mpw = MLX5_ARG_UNSET;
|
|
|
|
config->txqs_inline = MLX5_ARG_UNSET;
|
|
|
|
config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
|
|
|
|
config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
|
|
|
|
config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
|
|
|
|
config->log_hp_size = MLX5_ARG_UNSET;
|
|
|
|
config->std_delay_drop = 0;
|
|
|
|
config->hp_delay_drop = 0;
|
2022-02-14 09:35:11 +00:00
|
|
|
if (mkvlist != NULL) {
|
2022-02-14 09:35:09 +00:00
|
|
|
/* Process parameters. */
|
2022-02-14 09:35:11 +00:00
|
|
|
ret = mlx5_kvargs_process(mkvlist, params,
|
|
|
|
mlx5_port_args_check_handler, config);
|
2022-02-14 09:35:09 +00:00
|
|
|
if (ret) {
|
|
|
|
DRV_LOG(ERR, "Failed to process port arguments: %s",
|
|
|
|
strerror(rte_errno));
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
2019-05-30 10:20:33 +00:00
|
|
|
}
|
2022-02-14 09:35:09 +00:00
|
|
|
/* Adjust parameters according to device capabilities. */
|
|
|
|
if (config->hw_padding && !dev_cap->hw_padding) {
|
|
|
|
DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
|
|
|
|
config->hw_padding = 0;
|
|
|
|
} else if (config->hw_padding) {
|
|
|
|
DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
|
2016-06-24 13:17:50 +00:00
|
|
|
}
|
2022-02-14 09:35:09 +00:00
|
|
|
/*
|
|
|
|
* MPW is disabled by default, while the Enhanced MPW is enabled
|
|
|
|
* by default.
|
|
|
|
*/
|
|
|
|
if (config->mps == MLX5_ARG_UNSET)
|
|
|
|
config->mps = (dev_cap->mps == MLX5_MPW_ENHANCED) ?
|
|
|
|
MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
|
|
|
|
else
|
|
|
|
config->mps = config->mps ? dev_cap->mps : MLX5_MPW_DISABLED;
|
|
|
|
DRV_LOG(INFO, "%sMPS is %s",
|
|
|
|
config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
|
|
|
|
config->mps == MLX5_MPW ? "legacy " : "",
|
|
|
|
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
|
2022-07-27 12:24:06 +00:00
|
|
|
if (priv->sh->config.lro_allowed) {
|
2022-02-14 09:35:09 +00:00
|
|
|
/*
|
|
|
|
* If LRO timeout is not configured by application,
|
|
|
|
* use the minimal supported value.
|
|
|
|
*/
|
|
|
|
if (!config->lro_timeout)
|
|
|
|
config->lro_timeout =
|
|
|
|
hca_attr->lro_timer_supported_periods[0];
|
|
|
|
DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
|
|
|
|
config->lro_timeout);
|
|
|
|
}
|
|
|
|
if (config->cqe_comp && !dev_cap->cqe_comp) {
|
|
|
|
DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
|
|
|
|
config->cqe_comp = 0;
|
|
|
|
}
|
|
|
|
if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
|
|
|
|
(!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
|
|
|
|
DRV_LOG(WARNING,
|
|
|
|
"Flow Tag CQE compression format isn't supported.");
|
|
|
|
config->cqe_comp = 0;
|
|
|
|
}
|
|
|
|
if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
|
|
|
|
(!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
|
|
|
|
DRV_LOG(WARNING,
|
|
|
|
"L3/L4 Header CQE compression format isn't supported.");
|
|
|
|
config->cqe_comp = 0;
|
|
|
|
}
|
|
|
|
DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
|
|
|
|
config->cqe_comp ? "" : "not ");
|
|
|
|
if ((config->std_delay_drop || config->hp_delay_drop) &&
|
|
|
|
!dev_cap->rq_delay_drop_en) {
|
|
|
|
config->std_delay_drop = 0;
|
|
|
|
config->hp_delay_drop = 0;
|
|
|
|
DRV_LOG(WARNING, "dev_port-%u: Rxq delay drop isn't supported.",
|
|
|
|
priv->dev_port);
|
|
|
|
}
|
|
|
|
if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
|
|
|
|
DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
|
|
|
|
config->mprq.enabled = 0;
|
|
|
|
}
|
|
|
|
if (config->max_dump_files_num == 0)
|
|
|
|
config->max_dump_files_num = 128;
|
|
|
|
/* Detect minimal data bytes to inline. */
|
|
|
|
mlx5_set_min_inline(priv);
|
|
|
|
DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
|
|
|
|
config->hw_vlan_insert ? "" : "not ");
|
|
|
|
DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
|
|
|
|
DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
|
|
|
|
DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
|
|
|
|
DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
|
|
|
|
DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
|
|
|
|
config->std_delay_drop);
|
|
|
|
DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
|
|
|
|
DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
|
|
|
|
config->max_dump_files_num);
|
|
|
|
DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
|
|
|
|
DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
|
|
|
|
DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
|
|
|
|
config->mprq.log_stride_num);
|
|
|
|
DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
|
|
|
|
config->mprq.log_stride_size);
|
|
|
|
DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
|
|
|
|
config->mprq.max_memcpy_len);
|
|
|
|
DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
|
|
|
|
DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro_timeout);
|
|
|
|
DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
|
|
|
|
DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
|
|
|
|
DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
|
|
|
|
DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
|
|
|
|
DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
|
|
|
|
return 0;
|
2016-06-24 13:17:50 +00:00
|
|
|
}
|
|
|
|
|
2022-02-14 09:35:11 +00:00
|
|
|
/**
|
|
|
|
* Print the key for device argument.
|
|
|
|
*
|
|
|
|
* It is "dummy" handler whose whole purpose is to enable using
|
|
|
|
* mlx5_kvargs_process() function which set devargs as used.
|
|
|
|
*
|
|
|
|
* @param key
|
|
|
|
* Key argument.
|
|
|
|
* @param val
|
|
|
|
* Value associated with key, unused.
|
|
|
|
* @param opaque
|
|
|
|
* Unused, can be NULL.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, function cannot fail.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
mlx5_dummy_handler(const char *key, const char *val, void *opaque)
|
|
|
|
{
|
|
|
|
DRV_LOG(DEBUG, "\tKey: \"%s\" is set as used.", key);
|
|
|
|
RTE_SET_USED(opaque);
|
|
|
|
RTE_SET_USED(val);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set requested devargs as used when device is already spawned.
|
|
|
|
*
|
|
|
|
* It is necessary since it is valid to ask probe again for existing device,
|
|
|
|
* if its devargs don't assign as used, mlx5_kvargs_validate() will fail.
|
|
|
|
*
|
|
|
|
* @param name
|
|
|
|
* Name of the existing device.
|
|
|
|
* @param port_id
|
|
|
|
* Port identifier of the device.
|
|
|
|
* @param mkvlist
|
|
|
|
* Pointer to mlx5 kvargs control to sign as used.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
mlx5_port_args_set_used(const char *name, uint16_t port_id,
|
|
|
|
struct mlx5_kvargs_ctrl *mkvlist)
|
|
|
|
{
|
|
|
|
const char **params = (const char *[]){
|
|
|
|
MLX5_RXQ_CQE_COMP_EN,
|
|
|
|
MLX5_RXQ_PKT_PAD_EN,
|
|
|
|
MLX5_RX_MPRQ_EN,
|
|
|
|
MLX5_RX_MPRQ_LOG_STRIDE_NUM,
|
|
|
|
MLX5_RX_MPRQ_LOG_STRIDE_SIZE,
|
|
|
|
MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
|
|
|
|
MLX5_RXQS_MIN_MPRQ,
|
|
|
|
MLX5_TXQ_INLINE,
|
|
|
|
MLX5_TXQ_INLINE_MIN,
|
|
|
|
MLX5_TXQ_INLINE_MAX,
|
|
|
|
MLX5_TXQ_INLINE_MPW,
|
|
|
|
MLX5_TXQS_MIN_INLINE,
|
|
|
|
MLX5_TXQS_MAX_VEC,
|
|
|
|
MLX5_TXQ_MPW_EN,
|
|
|
|
MLX5_TXQ_MPW_HDR_DSEG_EN,
|
|
|
|
MLX5_TXQ_MAX_INLINE_LEN,
|
|
|
|
MLX5_TX_VEC_EN,
|
|
|
|
MLX5_RX_VEC_EN,
|
|
|
|
MLX5_REPRESENTOR,
|
|
|
|
MLX5_MAX_DUMP_FILES_NUM,
|
|
|
|
MLX5_LRO_TIMEOUT_USEC,
|
|
|
|
MLX5_HP_BUF_SIZE,
|
|
|
|
MLX5_DELAY_DROP,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Secondary process should not handle devargs. */
|
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
|
|
|
return;
|
|
|
|
MLX5_ASSERT(mkvlist != NULL);
|
|
|
|
DRV_LOG(DEBUG, "Ethernet device \"%s\" for port %u "
|
|
|
|
"already exists, set devargs as used:", name, port_id);
|
|
|
|
/* This function cannot fail with this handler. */
|
|
|
|
mlx5_kvargs_process(mkvlist, params, mlx5_dummy_handler, NULL);
|
|
|
|
}
|
|
|
|
|
2022-02-14 09:35:07 +00:00
|
|
|
/**
|
|
|
|
* Check sibling device configurations when probing again.
|
|
|
|
*
|
|
|
|
* Sibling devices sharing infiniband device context should have compatible
|
|
|
|
* configurations. This regards representors and bonding device.
|
|
|
|
*
|
|
|
|
* @param cdev
|
|
|
|
* Pointer to mlx5 device structure.
|
2022-02-14 09:35:11 +00:00
|
|
|
* @param mkvlist
|
|
|
|
* Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
|
2022-02-14 09:35:07 +00:00
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
|
|
|
*/
|
|
|
|
int
|
2022-02-14 09:35:11 +00:00
|
|
|
mlx5_probe_again_args_validate(struct mlx5_common_device *cdev,
|
|
|
|
struct mlx5_kvargs_ctrl *mkvlist)
|
2022-02-14 09:35:07 +00:00
|
|
|
{
|
|
|
|
struct mlx5_dev_ctx_shared *sh = NULL;
|
|
|
|
struct mlx5_sh_config *config;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Secondary process should not handle devargs. */
|
|
|
|
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
|
|
|
|
return 0;
|
|
|
|
pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
|
|
|
|
/* Search for IB context by common device pointer. */
|
|
|
|
LIST_FOREACH(sh, &mlx5_dev_ctx_list, next)
|
|
|
|
if (sh->cdev == cdev)
|
|
|
|
break;
|
|
|
|
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
|
|
|
|
/* There is sh for this device -> it isn't probe again. */
|
|
|
|
if (sh == NULL)
|
|
|
|
return 0;
|
|
|
|
config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
|
|
|
|
sizeof(struct mlx5_sh_config),
|
|
|
|
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
|
|
|
|
if (config == NULL) {
|
|
|
|
rte_errno = -ENOMEM;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Creates a temporary IB context configure structure according to new
|
|
|
|
* devargs attached in probing again.
|
|
|
|
*/
|
2022-02-14 09:35:11 +00:00
|
|
|
ret = mlx5_shared_dev_ctx_args_config(sh, mkvlist, config);
|
2022-02-14 09:35:07 +00:00
|
|
|
if (ret) {
|
|
|
|
DRV_LOG(ERR, "Failed to process device configure: %s",
|
|
|
|
strerror(rte_errno));
|
|
|
|
mlx5_free(config);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Checks the match between the temporary structure and the existing
|
|
|
|
* IB context structure.
|
|
|
|
*/
|
|
|
|
if (sh->config.dv_flow_en ^ config->dv_flow_en) {
|
|
|
|
DRV_LOG(ERR, "\"dv_flow_en\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if ((sh->config.dv_xmeta_en ^ config->dv_xmeta_en) ||
|
|
|
|
(sh->config.dv_miss_info ^ config->dv_miss_info)) {
|
|
|
|
DRV_LOG(ERR, "\"dv_xmeta_en\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.dv_esw_en ^ config->dv_esw_en) {
|
|
|
|
DRV_LOG(ERR, "\"dv_esw_en\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.reclaim_mode ^ config->reclaim_mode) {
|
|
|
|
DRV_LOG(ERR, "\"reclaim_mode\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.allow_duplicate_pattern ^
|
|
|
|
config->allow_duplicate_pattern) {
|
|
|
|
DRV_LOG(ERR, "\"allow_duplicate_pattern\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
2022-10-20 15:41:39 +00:00
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.fdb_def_rule ^ config->fdb_def_rule) {
|
|
|
|
DRV_LOG(ERR, "\"fdb_def_rule_en\" configuration mismatch for shared %s context.",
|
2022-02-14 09:35:07 +00:00
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {
|
|
|
|
DRV_LOG(ERR, "\"l3_vxlan_en\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.decap_en ^ config->decap_en) {
|
|
|
|
DRV_LOG(ERR, "\"decap_en\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.lacp_by_user ^ config->lacp_by_user) {
|
|
|
|
DRV_LOG(ERR, "\"lacp_by_user\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.tx_pp ^ config->tx_pp) {
|
|
|
|
DRV_LOG(ERR, "\"tx_pp\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (sh->config.tx_skew ^ config->tx_skew) {
|
|
|
|
DRV_LOG(ERR, "\"tx_skew\" "
|
|
|
|
"configuration mismatch for shared %s context.",
|
|
|
|
sh->ibdev_name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
mlx5_free(config);
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
mlx5_free(config);
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return -rte_errno;
|
|
|
|
}
|
|
|
|
|
2019-07-21 14:24:57 +00:00
|
|
|
/**
|
|
|
|
* Configures the minimal amount of data to inline into WQE
|
|
|
|
* while sending packets.
|
|
|
|
*
|
|
|
|
* - the txq_inline_min has the maximal priority, if this
|
|
|
|
* key is specified in devargs
|
|
|
|
* - if DevX is enabled the inline mode is queried from the
|
|
|
|
* device (HCA attributes and NIC vport context if needed).
|
2020-02-24 19:52:14 +00:00
|
|
|
* - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
|
2019-07-21 14:24:57 +00:00
|
|
|
* and none (0 bytes) for other NICs
|
|
|
|
*
|
2022-02-14 09:35:09 +00:00
|
|
|
* @param priv
|
|
|
|
* Pointer to the private device data structure.
|
2019-07-21 14:24:57 +00:00
|
|
|
*/
|
2020-06-03 15:06:00 +00:00
|
|
|
void
|
2022-02-14 09:35:09 +00:00
|
|
|
mlx5_set_min_inline(struct mlx5_priv *priv)
|
2019-07-21 14:24:57 +00:00
|
|
|
{
|
2022-02-14 09:35:09 +00:00
|
|
|
struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
|
|
|
|
struct mlx5_port_config *config = &priv->config;
|
2022-02-14 09:34:58 +00:00
|
|
|
|
2019-07-21 14:24:57 +00:00
|
|
|
if (config->txq_inline_min != MLX5_ARG_UNSET) {
|
|
|
|
/* Application defines size of inlined data explicitly. */
|
2022-02-14 09:35:09 +00:00
|
|
|
if (priv->pci_dev != NULL) {
|
|
|
|
switch (priv->pci_dev->id.device_id) {
|
2021-07-21 14:37:33 +00:00
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
|
|
|
|
if (config->txq_inline_min <
|
|
|
|
(int)MLX5_INLINE_HSIZE_L2) {
|
|
|
|
DRV_LOG(DEBUG,
|
|
|
|
"txq_inline_mix aligned to minimal ConnectX-4 required value %d",
|
|
|
|
(int)MLX5_INLINE_HSIZE_L2);
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_L2;
|
|
|
|
}
|
|
|
|
break;
|
2019-07-21 14:24:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
goto exit;
|
|
|
|
}
|
2022-02-14 09:34:58 +00:00
|
|
|
if (hca_attr->eth_net_offloads) {
|
2019-07-21 14:24:57 +00:00
|
|
|
/* We have DevX enabled, inline mode queried successfully. */
|
2022-02-14 09:34:58 +00:00
|
|
|
switch (hca_attr->wqe_inline_mode) {
|
2019-07-21 14:24:57 +00:00
|
|
|
case MLX5_CAP_INLINE_MODE_L2:
|
|
|
|
/* outer L2 header must be inlined. */
|
|
|
|
config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
|
|
|
|
goto exit;
|
|
|
|
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
|
|
|
|
/* No inline data are required by NIC. */
|
|
|
|
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
|
|
|
|
config->hw_vlan_insert =
|
2022-02-14 09:34:58 +00:00
|
|
|
hca_attr->wqe_vlan_insert;
|
2019-07-21 14:24:57 +00:00
|
|
|
DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
|
|
|
|
goto exit;
|
|
|
|
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
|
|
|
/* inline mode is defined by NIC vport context. */
|
2022-02-14 09:34:58 +00:00
|
|
|
if (!hca_attr->eth_virt)
|
2019-07-21 14:24:57 +00:00
|
|
|
break;
|
2022-02-14 09:34:58 +00:00
|
|
|
switch (hca_attr->vport_inline_mode) {
|
2019-07-21 14:24:57 +00:00
|
|
|
case MLX5_INLINE_MODE_NONE:
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_NONE;
|
|
|
|
goto exit;
|
|
|
|
case MLX5_INLINE_MODE_L2:
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_L2;
|
|
|
|
goto exit;
|
|
|
|
case MLX5_INLINE_MODE_IP:
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_L3;
|
|
|
|
goto exit;
|
|
|
|
case MLX5_INLINE_MODE_TCP_UDP:
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_L4;
|
|
|
|
goto exit;
|
|
|
|
case MLX5_INLINE_MODE_INNER_L2:
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_INNER_L2;
|
|
|
|
goto exit;
|
|
|
|
case MLX5_INLINE_MODE_INNER_IP:
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_INNER_L3;
|
|
|
|
goto exit;
|
|
|
|
case MLX5_INLINE_MODE_INNER_TCP_UDP:
|
|
|
|
config->txq_inline_min =
|
|
|
|
MLX5_INLINE_HSIZE_INNER_L4;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-02-14 09:35:09 +00:00
|
|
|
if (priv->pci_dev == NULL) {
|
2021-07-21 14:37:33 +00:00
|
|
|
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
|
|
|
|
goto exit;
|
|
|
|
}
|
2019-07-21 14:24:57 +00:00
|
|
|
/*
|
|
|
|
* We get here if we are unable to deduce
|
|
|
|
* inline data size with DevX. Try PCI ID
|
|
|
|
* to determine old NICs.
|
|
|
|
*/
|
2022-02-14 09:35:09 +00:00
|
|
|
switch (priv->pci_dev->id.device_id) {
|
2019-07-21 14:24:57 +00:00
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
|
2019-08-05 13:03:49 +00:00
|
|
|
config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
|
2019-07-21 14:24:57 +00:00
|
|
|
config->hw_vlan_insert = 0;
|
|
|
|
break;
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
|
|
|
|
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
|
|
|
|
/*
|
|
|
|
* These NICs support VLAN insertion from WQE and
|
|
|
|
* report the wqe_vlan_insert flag. But there is the bug
|
|
|
|
* and PFC control may be broken, so disable feature.
|
|
|
|
*/
|
|
|
|
config->hw_vlan_insert = 0;
|
2019-07-31 22:41:11 +00:00
|
|
|
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
|
2019-07-21 14:24:57 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
exit:
|
|
|
|
DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min);
|
|
|
|
}
|
|
|
|
|
2019-11-07 17:09:55 +00:00
|
|
|
/**
|
|
|
|
* Configures the metadata mask fields in the shared context.
|
|
|
|
*
|
|
|
|
* @param [in] dev
|
|
|
|
* Pointer to Ethernet device.
|
|
|
|
*/
|
2020-06-03 15:06:00 +00:00
|
|
|
void
|
2019-11-07 17:09:55 +00:00
|
|
|
mlx5_set_metadata_mask(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct mlx5_priv *priv = dev->data->dev_private;
|
2020-06-03 15:05:55 +00:00
|
|
|
struct mlx5_dev_ctx_shared *sh = priv->sh;
|
2019-11-07 17:09:55 +00:00
|
|
|
uint32_t meta, mark, reg_c0;
|
|
|
|
|
|
|
|
reg_c0 = ~priv->vport_meta_mask;
|
2022-02-14 09:35:07 +00:00
|
|
|
switch (sh->config.dv_xmeta_en) {
|
2019-11-07 17:09:55 +00:00
|
|
|
case MLX5_XMETA_MODE_LEGACY:
|
|
|
|
meta = UINT32_MAX;
|
|
|
|
mark = MLX5_FLOW_MARK_MASK;
|
|
|
|
break;
|
|
|
|
case MLX5_XMETA_MODE_META16:
|
|
|
|
meta = reg_c0 >> rte_bsf32(reg_c0);
|
|
|
|
mark = MLX5_FLOW_MARK_MASK;
|
|
|
|
break;
|
|
|
|
case MLX5_XMETA_MODE_META32:
|
|
|
|
meta = UINT32_MAX;
|
|
|
|
mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK;
|
|
|
|
break;
|
2022-10-20 15:41:40 +00:00
|
|
|
case MLX5_XMETA_MODE_META32_HWS:
|
|
|
|
meta = UINT32_MAX;
|
|
|
|
mark = MLX5_FLOW_MARK_MASK;
|
|
|
|
break;
|
2019-11-07 17:09:55 +00:00
|
|
|
default:
|
|
|
|
meta = 0;
|
|
|
|
mark = 0;
|
2020-01-30 16:14:40 +00:00
|
|
|
MLX5_ASSERT(false);
|
2019-11-07 17:09:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
|
2021-11-29 16:08:02 +00:00
|
|
|
DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
|
2019-11-07 17:09:55 +00:00
|
|
|
sh->dv_mark_mask, mark);
|
|
|
|
else
|
|
|
|
sh->dv_mark_mask = mark;
|
|
|
|
if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
|
2021-11-29 16:08:02 +00:00
|
|
|
DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
|
2019-11-07 17:09:55 +00:00
|
|
|
sh->dv_meta_mask, meta);
|
|
|
|
else
|
|
|
|
sh->dv_meta_mask = meta;
|
|
|
|
if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
|
2021-11-29 16:08:02 +00:00
|
|
|
DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
|
2019-11-07 17:09:55 +00:00
|
|
|
sh->dv_meta_mask, reg_c0);
|
|
|
|
else
|
|
|
|
sh->dv_regc0_mask = reg_c0;
|
2022-02-14 09:35:07 +00:00
|
|
|
DRV_LOG(DEBUG, "metadata mode %u", sh->config.dv_xmeta_en);
|
2019-11-07 17:09:55 +00:00
|
|
|
DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
|
|
|
|
DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
|
|
|
|
DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
|
|
|
|
}
|
|
|
|
|
2020-01-29 12:21:06 +00:00
|
|
|
int
|
|
|
|
rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
|
|
|
|
{
|
|
|
|
static const char *const dynf_names[] = {
|
|
|
|
RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
|
2020-07-16 08:23:05 +00:00
|
|
|
RTE_MBUF_DYNFLAG_METADATA_NAME,
|
|
|
|
RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME
|
2020-01-29 12:21:06 +00:00
|
|
|
};
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (n < RTE_DIM(dynf_names))
|
|
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < RTE_DIM(dynf_names); i++) {
|
|
|
|
if (names[i] == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
strcpy(names[i], dynf_names[i]);
|
|
|
|
}
|
|
|
|
return RTE_DIM(dynf_names);
|
|
|
|
}
|
|
|
|
|
2019-10-07 13:56:19 +00:00
|
|
|
/**
|
|
|
|
* Look for the ethernet device belonging to mlx5 driver.
|
|
|
|
*
|
|
|
|
* @param[in] port_id
|
|
|
|
* port_id to start looking for device.
|
2021-07-21 14:37:33 +00:00
|
|
|
* @param[in] odev
|
|
|
|
* Pointer to the hint device. When device is being probed
|
2019-10-07 13:56:19 +00:00
|
|
|
* the its siblings (master and preceding representors might
|
2020-06-03 15:06:00 +00:00
|
|
|
* not have assigned driver yet (because the mlx5_os_pci_probe()
|
2021-07-21 14:37:33 +00:00
|
|
|
* is not completed yet, for this case match on hint
|
2019-10-07 13:56:19 +00:00
|
|
|
* device may be used to detect sibling device.
|
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* port_id of found device, RTE_MAX_ETHPORT if not found.
|
|
|
|
*/
|
2019-09-25 07:53:33 +00:00
|
|
|
uint16_t
|
2021-07-21 14:37:33 +00:00
|
|
|
mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
|
2019-09-25 07:53:33 +00:00
|
|
|
{
|
|
|
|
while (port_id < RTE_MAX_ETHPORTS) {
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (dev->state != RTE_ETH_DEV_UNUSED &&
|
|
|
|
dev->device &&
|
2021-07-21 14:37:33 +00:00
|
|
|
(dev->device == odev ||
|
2019-10-07 13:56:19 +00:00
|
|
|
(dev->device->driver &&
|
|
|
|
dev->device->driver->name &&
|
2021-07-21 14:37:35 +00:00
|
|
|
((strcmp(dev->device->driver->name,
|
|
|
|
MLX5_PCI_DRIVER_NAME) == 0) ||
|
|
|
|
(strcmp(dev->device->driver->name,
|
|
|
|
MLX5_AUXILIARY_DRIVER_NAME) == 0)))))
|
2019-09-25 07:53:33 +00:00
|
|
|
break;
|
|
|
|
port_id++;
|
|
|
|
}
|
|
|
|
if (port_id >= RTE_MAX_ETHPORTS)
|
|
|
|
return RTE_MAX_ETHPORTS;
|
|
|
|
return port_id;
|
|
|
|
}
|
|
|
|
|
2018-10-23 18:26:05 +00:00
|
|
|
/**
|
2021-07-21 14:37:34 +00:00
|
|
|
* Callback to remove a device.
|
2018-10-23 18:26:05 +00:00
|
|
|
*
|
2021-07-21 14:37:34 +00:00
|
|
|
* This function removes all Ethernet devices belong to a given device.
|
2018-10-23 18:26:05 +00:00
|
|
|
*
|
2021-10-19 20:55:46 +00:00
|
|
|
* @param[in] cdev
|
2021-07-21 14:37:34 +00:00
|
|
|
* Pointer to the generic device.
|
2018-10-23 18:26:05 +00:00
|
|
|
*
|
|
|
|
* @return
|
|
|
|
* 0 on success, the function cannot fail.
|
|
|
|
*/
|
2021-09-12 10:36:24 +00:00
|
|
|
int
|
2021-10-19 20:55:46 +00:00
|
|
|
mlx5_net_remove(struct mlx5_common_device *cdev)
|
2018-10-23 18:26:05 +00:00
|
|
|
{
|
|
|
|
uint16_t port_id;
|
2020-10-16 13:32:59 +00:00
|
|
|
int ret = 0;
|
2018-10-23 18:26:05 +00:00
|
|
|
|
2021-10-19 20:55:46 +00:00
|
|
|
RTE_ETH_FOREACH_DEV_OF(port_id, cdev->dev) {
|
2020-05-28 06:59:49 +00:00
|
|
|
/*
|
|
|
|
* mlx5_dev_close() is not registered to secondary process,
|
|
|
|
* call the close function explicitly for secondary process.
|
|
|
|
*/
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
|
2020-10-16 13:32:59 +00:00
|
|
|
ret |= mlx5_dev_close(&rte_eth_devices[port_id]);
|
2020-05-28 06:59:49 +00:00
|
|
|
else
|
2020-10-16 13:32:59 +00:00
|
|
|
ret |= rte_eth_dev_close(port_id);
|
2020-05-28 06:59:49 +00:00
|
|
|
}
|
2020-10-16 13:32:59 +00:00
|
|
|
return ret == 0 ? 0 : -EIO;
|
2018-10-23 18:26:05 +00:00
|
|
|
}
|
|
|
|
|
2015-10-30 18:52:30 +00:00
|
|
|
static const struct rte_pci_id mlx5_pci_id_map[] = {
|
|
|
|
{
|
2016-06-24 13:17:40 +00:00
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX4)
|
2015-10-30 18:52:30 +00:00
|
|
|
},
|
|
|
|
{
|
2016-06-24 13:17:40 +00:00
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
|
2015-10-30 18:52:30 +00:00
|
|
|
},
|
|
|
|
{
|
2016-06-24 13:17:40 +00:00
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
|
2015-10-30 18:52:30 +00:00
|
|
|
},
|
|
|
|
{
|
2016-06-24 13:17:40 +00:00
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
|
2015-10-30 18:52:30 +00:00
|
|
|
},
|
2017-01-06 00:49:31 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX5)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
|
|
|
|
},
|
2018-05-15 06:12:50 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
|
|
|
|
},
|
2018-09-02 13:55:59 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF)
|
|
|
|
},
|
2018-12-31 12:43:48 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX6)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX6VF)
|
|
|
|
},
|
2019-11-07 09:36:09 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX6DX)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
2020-11-19 08:06:10 +00:00
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTXVF)
|
2019-11-07 09:36:09 +00:00
|
|
|
},
|
2020-02-13 16:11:42 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
|
|
|
|
},
|
2020-07-08 09:14:04 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX6LX)
|
|
|
|
},
|
2020-10-26 11:41:47 +00:00
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX7)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
|
|
|
|
PCI_DEVICE_ID_MELLANOX_CONNECTX7BF)
|
|
|
|
},
|
2015-10-30 18:52:30 +00:00
|
|
|
{
|
|
|
|
.vendor_id = 0
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-07-21 14:37:34 +00:00
|
|
|
static struct mlx5_class_driver mlx5_net_driver = {
|
|
|
|
.drv_class = MLX5_CLASS_ETH,
|
|
|
|
.name = RTE_STR(MLX5_ETH_DRIVER_NAME),
|
|
|
|
.id_table = mlx5_pci_id_map,
|
|
|
|
.probe = mlx5_os_net_probe,
|
|
|
|
.remove = mlx5_net_remove,
|
|
|
|
.probe_again = 1,
|
|
|
|
.intr_lsc = 1,
|
|
|
|
.intr_rmv = 1,
|
2015-10-30 18:52:30 +00:00
|
|
|
};
|
|
|
|
|
2020-07-01 12:33:35 +00:00
|
|
|
/* Initialize driver log type. */
|
log: register with standardized names
Let's try to enforce the convention where most drivers use a pmd. logtype
with their class reflected in it, and libraries use a lib. logtype.
Introduce two new macros:
- RTE_LOG_REGISTER_DEFAULT can be used when a single logtype is
used in a component. It is associated to the default name provided
by the build system,
- RTE_LOG_REGISTER_SUFFIX can be used when multiple logtypes are used,
and then the passed name is appended to the default name,
RTE_LOG_REGISTER is left untouched for existing external users
and for components that do not comply with the convention.
There is a new Meson variable log_prefix to adapt the default name
for baseband (pmd.bb.), bus (no pmd.) and mempool (no pmd.) classes.
Note: achieved with below commands + reverted change on net/bonding +
edits on crypto/virtio, compress/mlx5, regex/mlx5
$ git grep -l RTE_LOG_REGISTER drivers/ |
while read file; do
pattern=${file##drivers/};
class=${pattern%%/*};
pattern=${pattern#$class/};
drv=${pattern%%/*};
case "$class" in
baseband) pattern=pmd.bb.$drv;;
bus) pattern=bus.$drv;;
mempool) pattern=mempool.$drv;;
*) pattern=pmd.$class.$drv;;
esac
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern',/RTE_LOG_REGISTER_DEFAULT(\1,/' $file;
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern'\.\(.*\),/RTE_LOG_REGISTER_SUFFIX(\1, \2,/' $file;
done
$ git grep -l RTE_LOG_REGISTER lib/ |
while read file; do
pattern=${file##lib/};
pattern=lib.${pattern%%/*};
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern',/RTE_LOG_REGISTER_DEFAULT(\1,/' $file;
sed -i -e 's/RTE_LOG_REGISTER(\(.*\), '$pattern'\.\(.*\),/RTE_LOG_REGISTER_SUFFIX(\1, \2,/' $file;
done
Signed-off-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2021-04-26 12:51:08 +00:00
|
|
|
RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE)
|
2020-07-01 12:33:35 +00:00
|
|
|
|
2015-10-30 18:52:30 +00:00
|
|
|
/**
|
|
|
|
* Driver initialization routine.
|
|
|
|
*/
|
2018-06-18 12:32:21 +00:00
|
|
|
RTE_INIT(rte_mlx5_pmd_init)
|
2015-10-30 18:52:30 +00:00
|
|
|
{
|
2020-12-28 12:32:59 +00:00
|
|
|
pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL);
|
2020-07-27 17:47:11 +00:00
|
|
|
mlx5_common_init();
|
2018-04-08 12:41:20 +00:00
|
|
|
/* Build the static tables for Verbs conversion. */
|
2017-07-26 19:29:33 +00:00
|
|
|
mlx5_set_ptype_table();
|
2018-04-08 12:41:20 +00:00
|
|
|
mlx5_set_cksum_table();
|
|
|
|
mlx5_set_swp_types_table();
|
2020-01-29 12:38:27 +00:00
|
|
|
if (mlx5_glue)
|
2021-07-21 14:37:34 +00:00
|
|
|
mlx5_class_driver_register(&mlx5_net_driver);
|
2015-10-30 18:52:30 +00:00
|
|
|
}
|
|
|
|
|
2021-07-21 14:37:34 +00:00
|
|
|
RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__);
|
|
|
|
RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map);
|
|
|
|
RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");
|