2018-01-29 13:11:30 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2015 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2015 Mellanox Technologies, Ltd
|
2015-10-30 18:52:30 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RTE_PMD_MLX5_DEFS_H_
|
|
|
|
#define RTE_PMD_MLX5_DEFS_H_
|
|
|
|
|
2021-01-29 16:48:19 +00:00
|
|
|
#include <ethdev_driver.h>
|
2019-10-23 13:19:27 +00:00
|
|
|
#include <rte_vxlan.h>
|
2018-01-03 09:14:19 +00:00
|
|
|
|
2021-10-19 20:55:47 +00:00
|
|
|
#include <mlx5_common_defs.h>
|
|
|
|
|
2016-03-03 14:26:43 +00:00
|
|
|
#include "mlx5_autoconf.h"
|
|
|
|
|
2015-10-30 18:52:40 +00:00
|
|
|
/* Maximum number of simultaneous VLAN filters. */
|
|
|
|
#define MLX5_MAX_VLAN_IDS 128
|
|
|
|
|
2016-06-24 13:17:55 +00:00
|
|
|
/*
|
|
|
|
* Request TX completion every time descriptors reach this threshold since
|
|
|
|
* the previous request. Must be a power of two for performance reasons.
|
|
|
|
*/
|
2019-07-29 12:41:05 +00:00
|
|
|
#define MLX5_TX_COMP_THRESH 32u
|
2015-10-30 18:52:31 +00:00
|
|
|
|
2017-03-15 23:55:44 +00:00
|
|
|
/*
|
|
|
|
* Request TX completion every time the total number of WQEBBs used for inlining
|
|
|
|
* packets exceeds the size of WQ divided by this divisor. Better to be power of
|
|
|
|
* two for performance.
|
|
|
|
*/
|
|
|
|
#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
|
|
|
|
|
2019-07-29 12:41:03 +00:00
|
|
|
/*
|
|
|
|
* Maximal amount of normal completion CQEs
|
|
|
|
* processed in one call of tx_burst() routine.
|
|
|
|
*/
|
|
|
|
#define MLX5_TX_COMP_MAX_CQE 2u
|
|
|
|
|
2015-10-30 18:52:36 +00:00
|
|
|
/*
|
|
|
|
* If defined, only use software counters. The PMD will never ask the hardware
|
|
|
|
* for these, and many of them won't be available.
|
|
|
|
*/
|
|
|
|
#ifndef MLX5_PMD_SOFT_COUNTERS
|
|
|
|
#define MLX5_PMD_SOFT_COUNTERS 1
|
|
|
|
#endif
|
|
|
|
|
2022-10-20 15:41:47 +00:00
|
|
|
/* Maximum number of DCS created per port. */
|
|
|
|
#define MLX5_HWS_CNT_DCS_NUM 4
|
|
|
|
|
2015-10-30 18:57:23 +00:00
|
|
|
/* Alarm timeout. */
|
|
|
|
#define MLX5_ALARM_TIMEOUT_US 100000
|
|
|
|
|
2017-01-17 14:37:08 +00:00
|
|
|
/* Maximum number of extended statistics counters. */
|
2022-05-26 02:49:41 +00:00
|
|
|
#define MLX5_MAX_XSTATS 64
|
2017-01-17 14:37:08 +00:00
|
|
|
|
2017-03-02 09:01:31 +00:00
|
|
|
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
|
2022-01-13 14:32:29 +00:00
|
|
|
#define MLX5_MAX_TSO_HEADER 192U
|
2017-03-02 09:01:31 +00:00
|
|
|
|
2019-07-21 14:24:56 +00:00
|
|
|
/* Inline data size required by NICs. */
|
|
|
|
#define MLX5_INLINE_HSIZE_NONE 0
|
|
|
|
#define MLX5_INLINE_HSIZE_L2 (sizeof(struct rte_ether_hdr) + \
|
|
|
|
sizeof(struct rte_vlan_hdr))
|
|
|
|
#define MLX5_INLINE_HSIZE_L3 (MLX5_INLINE_HSIZE_L2 + \
|
|
|
|
sizeof(struct rte_ipv6_hdr))
|
|
|
|
#define MLX5_INLINE_HSIZE_L4 (MLX5_INLINE_HSIZE_L3 + \
|
|
|
|
sizeof(struct rte_tcp_hdr))
|
|
|
|
#define MLX5_INLINE_HSIZE_INNER_L2 (MLX5_INLINE_HSIZE_L3 + \
|
|
|
|
sizeof(struct rte_udp_hdr) + \
|
|
|
|
sizeof(struct rte_vxlan_hdr) + \
|
|
|
|
sizeof(struct rte_ether_hdr) + \
|
|
|
|
sizeof(struct rte_vlan_hdr))
|
|
|
|
#define MLX5_INLINE_HSIZE_INNER_L3 (MLX5_INLINE_HSIZE_INNER_L2 + \
|
|
|
|
sizeof(struct rte_ipv6_hdr))
|
|
|
|
#define MLX5_INLINE_HSIZE_INNER_L4 (MLX5_INLINE_HSIZE_INNER_L3 + \
|
|
|
|
sizeof(struct rte_tcp_hdr))
|
|
|
|
|
2017-07-06 18:41:10 +00:00
|
|
|
/* Threshold of buffer replenishment for vectorized Rx. */
|
2018-06-26 11:33:35 +00:00
|
|
|
#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
|
|
|
|
(RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
|
2017-07-06 18:41:10 +00:00
|
|
|
|
|
|
|
/* Maximum size of burst for vectorized Rx. */
|
2018-06-26 11:33:35 +00:00
|
|
|
#define MLX5_VPMD_RX_MAX_BURST 64U
|
2017-07-06 18:41:10 +00:00
|
|
|
|
2019-11-15 11:35:06 +00:00
|
|
|
/* Recommended optimal burst size. */
|
|
|
|
#define MLX5_RX_DEFAULT_BURST 64U
|
|
|
|
#define MLX5_TX_DEFAULT_BURST 64U
|
|
|
|
|
2017-07-06 18:41:10 +00:00
|
|
|
/* Number of packets vectorized Rx can simultaneously process in a loop. */
|
|
|
|
#define MLX5_VPMD_DESCS_PER_LOOP 4
|
|
|
|
|
2019-12-18 10:05:47 +00:00
|
|
|
/* Mask of RSS on source only or destination only. */
|
2021-10-22 11:03:12 +00:00
|
|
|
#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
|
|
|
|
RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
|
2019-12-18 10:05:47 +00:00
|
|
|
|
2018-01-03 09:14:19 +00:00
|
|
|
/* Supported RSS */
|
2021-10-22 11:03:12 +00:00
|
|
|
#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
|
2022-05-12 09:17:11 +00:00
|
|
|
MLX5_RSS_SRC_DST_ONLY | RTE_ETH_RSS_ESP))
|
2018-01-03 09:14:19 +00:00
|
|
|
|
2018-03-12 13:43:19 +00:00
|
|
|
/* Timeout in seconds to get a valid link status. */
|
|
|
|
#define MLX5_LINK_STATUS_TIMEOUT 10
|
2018-01-25 16:04:28 +00:00
|
|
|
|
2019-10-16 07:34:03 +00:00
|
|
|
/* Number of times to retry retrieving the physical link information. */
|
|
|
|
#define MLX5_GET_LINK_STATUS_RETRY_COUNT 3
|
|
|
|
|
2018-07-12 12:01:31 +00:00
|
|
|
/* Maximum number of UAR pages used by a port,
|
|
|
|
* These are the size and mask for an array of mutexes used to synchronize
|
|
|
|
* the access to port's UARs on platforms that do not support 64 bit writes.
|
|
|
|
* In such systems it is possible to issue the 64 bits DoorBells through two
|
|
|
|
* consecutive writes, each write 32 bits. The access to a UAR page (which can
|
|
|
|
* be accessible by all threads in the process) must be synchronized
|
|
|
|
* (for example, using a semaphore). Such a synchronization is not required
|
|
|
|
* when ringing DoorBells on different UAR pages.
|
|
|
|
* A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared
|
|
|
|
* among the ports.
|
|
|
|
*/
|
|
|
|
#define MLX5_UAR_PAGE_NUM_MAX 64
|
|
|
|
#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1)
|
2018-01-25 15:00:24 +00:00
|
|
|
|
2018-05-09 11:13:50 +00:00
|
|
|
/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
|
2021-11-23 18:38:04 +00:00
|
|
|
#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM 6U
|
2018-05-09 11:13:50 +00:00
|
|
|
|
2020-04-09 22:23:51 +00:00
|
|
|
/* Log 2 of the default size of a stride per WQE for Multi-Packet RQ. */
|
2021-11-23 18:38:04 +00:00
|
|
|
#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE 11U
|
2020-04-09 22:23:51 +00:00
|
|
|
|
2018-05-09 11:13:50 +00:00
|
|
|
/* Two-byte shift is disabled for Multi-Packet RQ. */
|
|
|
|
#define MLX5_MPRQ_TWO_BYTE_SHIFT 0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Minimum size of packet to be memcpy'd instead of being attached as an
|
|
|
|
* external buffer.
|
|
|
|
*/
|
|
|
|
#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128
|
|
|
|
|
|
|
|
/* Minimum number Rx queues to enable Multi-Packet RQ. */
|
|
|
|
#define MLX5_MPRQ_MIN_RXQS 12
|
|
|
|
|
|
|
|
/* Cache size of mempool for Multi-Packet RQ. */
|
2018-08-02 21:00:07 +00:00
|
|
|
#define MLX5_MPRQ_MP_CACHE_SZ 32U
|
2018-05-09 11:13:50 +00:00
|
|
|
|
2019-11-15 11:35:06 +00:00
|
|
|
/* MLX5_DV_XMETA_EN supported values. */
|
net/mlx5: add devarg for extensive metadata support
The PMD parameter dv_xmeta_en is added to control extensive
metadata support. A nonzero value enables extensive flow
metadata support if device is capable and driver supports it.
This can enable extensive support of MARK and META item of
rte_flow. The newly introduced SET_TAG and SET_META actions
do not depend on dv_xmeta_en parameter, because there is
no compatibility issue for new entities. The dv_xmeta_en is
disabled by default.
There are some possible configurations, depending on parameter
value:
- 0, this is default value, defines the legacy mode, the MARK
and META related actions and items operate only within NIC Tx
and NIC Rx steering domains, no MARK and META information
crosses the domain boundaries. The MARK item is 24 bits wide,
the META item is 32 bits wide.
- 1, this engages extensive metadata mode, the MARK and META
related actions and items operate within all supported steering
domains, including FDB, MARK and META information may cross
the domain boundaries. The ``MARK`` item is 24 bits wide, the
META item width depends on kernel and firmware configurations
and might be 0, 16 or 32 bits. Within NIC Tx domain META data
width is 32 bits for compatibility, the actual width of data
transferred to the FDB domain depends on kernel configuration
and may be vary. The actual supported width can be retrieved
in runtime by series of rte_flow_validate() trials.
- 2, this engages extensive metadata mode, the MARK and META
related actions and items operate within all supported steering
domains, including FDB, MARK and META information may cross
the domain boundaries. The META item is 32 bits wide, the MARK
item width depends on kernel and firmware configurations and
might be 0, 16 or 24 bits. The actual supported width can be
retrieved in runtime by series of rte_flow_validate() trials.
If there is no E-Switch configuration the ``dv_xmeta_en`` parameter is
ignored and the device is configured to operate in legacy mode (0).
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
2019-11-07 17:09:54 +00:00
|
|
|
#define MLX5_XMETA_MODE_LEGACY 0
|
|
|
|
#define MLX5_XMETA_MODE_META16 1
|
|
|
|
#define MLX5_XMETA_MODE_META32 2
|
2020-10-25 14:08:09 +00:00
|
|
|
/* Provide info on patrial hw miss. Implies MLX5_XMETA_MODE_META16 */
|
|
|
|
#define MLX5_XMETA_MODE_MISS_INFO 3
|
2022-10-20 15:57:36 +00:00
|
|
|
/* Only valid in HWS, 32bits extended META without MARK support in FDB. */
|
|
|
|
#define MLX5_XMETA_MODE_META32_HWS 4
|
net/mlx5: add devarg for extensive metadata support
The PMD parameter dv_xmeta_en is added to control extensive
metadata support. A nonzero value enables extensive flow
metadata support if device is capable and driver supports it.
This can enable extensive support of MARK and META item of
rte_flow. The newly introduced SET_TAG and SET_META actions
do not depend on dv_xmeta_en parameter, because there is
no compatibility issue for new entities. The dv_xmeta_en is
disabled by default.
There are some possible configurations, depending on parameter
value:
- 0, this is default value, defines the legacy mode, the MARK
and META related actions and items operate only within NIC Tx
and NIC Rx steering domains, no MARK and META information
crosses the domain boundaries. The MARK item is 24 bits wide,
the META item is 32 bits wide.
- 1, this engages extensive metadata mode, the MARK and META
related actions and items operate within all supported steering
domains, including FDB, MARK and META information may cross
the domain boundaries. The ``MARK`` item is 24 bits wide, the
META item width depends on kernel and firmware configurations
and might be 0, 16 or 32 bits. Within NIC Tx domain META data
width is 32 bits for compatibility, the actual width of data
transferred to the FDB domain depends on kernel configuration
and may be vary. The actual supported width can be retrieved
in runtime by series of rte_flow_validate() trials.
- 2, this engages extensive metadata mode, the MARK and META
related actions and items operate within all supported steering
domains, including FDB, MARK and META information may cross
the domain boundaries. The META item is 32 bits wide, the MARK
item width depends on kernel and firmware configurations and
might be 0, 16 or 24 bits. The actual supported width can be
retrieved in runtime by series of rte_flow_validate() trials.
If there is no E-Switch configuration the ``dv_xmeta_en`` parameter is
ignored and the device is configured to operate in legacy mode (0).
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
2019-11-07 17:09:54 +00:00
|
|
|
|
2020-07-16 08:23:08 +00:00
|
|
|
/* Tx accurate scheduling on timestamps parameters. */
|
2020-07-16 08:23:12 +00:00
|
|
|
#define MLX5_TXPP_WAIT_INIT_TS 1000ul /* How long to wait timestamp. */
|
2020-07-16 08:23:08 +00:00
|
|
|
#define MLX5_TXPP_CLKQ_SIZE 1
|
2020-07-16 08:23:09 +00:00
|
|
|
#define MLX5_TXPP_REARM ((1UL << MLX5_WQ_INDEX_WIDTH) / 4)
|
|
|
|
#define MLX5_TXPP_REARM_SQ_SIZE (((1UL << MLX5_CQ_INDEX_WIDTH) / \
|
|
|
|
MLX5_TXPP_REARM) * 2)
|
|
|
|
#define MLX5_TXPP_REARM_CQ_SIZE (MLX5_TXPP_REARM_SQ_SIZE / 2)
|
2020-07-16 08:23:08 +00:00
|
|
|
/* The minimal size test packet to put into one WQE, padded by HW. */
|
|
|
|
#define MLX5_TXPP_TEST_PKT_SIZE (sizeof(struct rte_ether_hdr) + \
|
|
|
|
sizeof(struct rte_ipv4_hdr))
|
|
|
|
|
2019-11-07 17:10:04 +00:00
|
|
|
/* Size of the simple hash table for metadata register table. */
|
2021-07-13 08:44:55 +00:00
|
|
|
#define MLX5_FLOW_MREG_HTABLE_SZ 64
|
2019-11-07 17:10:04 +00:00
|
|
|
#define MLX5_FLOW_MREG_HNAME "MARK_COPY_TABLE"
|
2019-11-27 13:36:43 +00:00
|
|
|
#define MLX5_DEFAULT_COPY_ID UINT32_MAX
|
2019-11-07 17:10:04 +00:00
|
|
|
|
2020-07-31 03:34:18 +00:00
|
|
|
/* Size of the simple hash table for header modify table. */
|
2021-07-13 08:44:55 +00:00
|
|
|
#define MLX5_FLOW_HDR_MODIFY_HTABLE_SZ (1 << 15)
|
2020-07-31 03:34:18 +00:00
|
|
|
|
2020-09-16 10:19:48 +00:00
|
|
|
/* Size of the simple hash table for encap decap table. */
|
2021-07-13 08:44:55 +00:00
|
|
|
#define MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ (1 << 12)
|
2020-09-16 10:19:48 +00:00
|
|
|
|
2021-07-13 08:44:57 +00:00
|
|
|
/* Size of the hash table for tag table. */
|
|
|
|
#define MLX5_TAGS_HLIST_ARRAY_SIZE (1 << 15)
|
|
|
|
|
|
|
|
/* Size fo the hash table for SFT table. */
|
|
|
|
#define MLX5_FLOW_SFT_HLIST_ARRAY_SIZE 4096
|
|
|
|
|
2020-02-19 08:28:39 +00:00
|
|
|
/* Hairpin TX/RX queue configuration parameters. */
|
|
|
|
#define MLX5_HAIRPIN_QUEUE_STRIDE 6
|
2020-03-24 12:59:01 +00:00
|
|
|
#define MLX5_HAIRPIN_JUMBO_LOG_SIZE (14 + 2)
|
2020-02-19 08:28:39 +00:00
|
|
|
|
ethdev: introduce indirect flow action
Right now, rte_flow_shared_action_* APIs are used for some shared
actions, like RSS, count. The shared action should be created before
using it inside a flow. These shared actions sometimes are not
really shared but just some indirect actions decoupled from a flow.
The new functions rte_flow_action_handle_* are added to replace
the current shared functions rte_flow_shared_action_*.
There are two types of flow actions:
1. the direct (normal) actions that could be created and stored
within a flow rule. Such action is tied to its flow rule and
cannot be reused.
2. the indirect action, in the past, named shared_action. It is
created from a direct actioni, like count or rss, and then used
in the flow rules with an object handle. The PMD will take care
of the retrieve from indirect action to the direct action
when it is referenced.
The indirect action is accessed (update / query) w/o any flow rule,
just via the action object handle. For example, when querying or
resetting a counter, it could be done out of any flow using this
counter, but only the handle of the counter action object is
required.
The indirect action object could be shared by different flows or
used by a single flow, depending on the direct action type and
the real-life requirements.
The handle of an indirect action object is opaque and defined in
each driver and possibly different per direct action type.
The old name "shared" is improper in a sense and should be replaced.
Since the APIs are changed from "rte_flow_shared_action*" to the new
"rte_flow_action_handle*", the testpmd application code and command
line interfaces also need to be updated to do the adaption.
The testpmd application user guide is also updated. All the "shared
action" related parts are replaced with "indirect action" to have a
correct explanation.
The parameter of "update" interface is also changed. A general
pointer will replace the rte_flow_action struct pointer due to the
facts:
1. Some action may not support fields updating. In the example of a
counter, the only "update" supported should be the reset. So
passing a rte_flow_action struct pointer is meaningless and
there is even no such corresponding action struct. What's more,
if more than one operations should be supported, for some other
action, such pointer parameter may not meet the need.
2. Some action may need conditional or partial update, the current
parameter will not provide the ability to indicate which part(s)
to update.
For different types of indirect action objects, the pointer could
either be the same of rte_flow_action* struct - in order not to
break the current driver implementation, or some wrapper
structures with bits as masks to indicate which part to be
updated, depending on real needs of the corresponding direct
action. For different direct actions, the structures of indirect
action objects updating will be different.
All the underlayer PMD callbacks will be moved to these new APIs.
The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to
break the ABI. All the implementations are changed by using
RTE_FLOW_ACTION_TYPE_INDIRECT.
Since the APIs are changed from "rte_flow_shared_action*" to the new
"rte_flow_action_handle*" and the "update" interface's 3rd input
parameter is changed to generic pointer, the mlx5 PMD that uses these
APIs needs to do the adaption to the new APIs as well.
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Andrey Vesnovaty <andreyv@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
|
|
|
/* Maximum number of indirect actions supported by rte_flow */
|
2021-04-29 09:55:38 +00:00
|
|
|
#define MLX5_MAX_INDIRECT_ACTIONS 3
|
2020-10-23 10:24:09 +00:00
|
|
|
|
2022-02-24 23:25:10 +00:00
|
|
|
/* Maximum number of external Rx queues supported by rte_flow */
|
|
|
|
#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
|
|
|
|
|
2020-12-28 09:54:11 +00:00
|
|
|
/*
|
|
|
|
* Linux definition of static_assert is found in /usr/include/assert.h.
|
|
|
|
* Windows does not require a redefinition.
|
|
|
|
*/
|
|
|
|
#if !defined(HAVE_STATIC_ASSERT) && !defined(RTE_EXEC_ENV_WINDOWS)
|
2018-07-15 17:31:39 +00:00
|
|
|
#define static_assert _Static_assert
|
|
|
|
#endif
|
|
|
|
|
net/mlx5: support flow counter action for HWS
This commit adds HW steering counter action support.
The pool mechanism is the basic data structure for the HW steering
counter.
The HW steering's counter pool is based on the rte_ring of zero-copy
variation.
There are two global rte_rings:
1. free_list:
Store the counters indexes, which are ready for use.
2. wait_reset_list:
Store the counters indexes, which are just freed from the user and
need to query the hardware counter to get the reset value before
this counter can be reused again.
The counter pool also supports cache per HW steering's queues, which are
also based on the rte_ring of zero-copy variation.
The cache can be configured in size, preload, threshold, and fetch size,
they are all exposed via device args.
The main operations of the counter pool are as follows:
- Get one counter from the pool:
1. The user call _get_* API.
2. If the cache is enabled, dequeue one counter index from the local
cache:
2. A: if the dequeued one from the local cache is still in reset
status (counter's query_gen_when_free is equal to pool's query
gen):
I. Flush all counters in the local cache back to global
wait_reset_list.
II. Fetch _fetch_sz_ counters into the cache from the global
free list.
III. Fetch one counter from the cache.
3. If the cache is empty, fetch _fetch_sz_ counters from the global
free list into the cache and fetch one counter from the cache.
- Free one counter into the pool:
1. The user calls _put_* API.
2. Put the counter into the local cache.
3. If the local cache is full:
A: Write back all counters above _threshold_ into the global
wait_reset_list.
B: Also, write back this counter into the global wait_reset_list.
When the local cache is disabled, _get_/_put_ cache directly from/into
global list.
Signed-off-by: Xiaoyu Min <jackmin@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:42 +00:00
|
|
|
#define MLX5_CNT_SVC_CYCLE_TIME_DEFAULT 500
|
|
|
|
|
2015-10-30 18:52:30 +00:00
|
|
|
#endif /* RTE_PMD_MLX5_DEFS_H_ */
|