numam-dpdk/drivers/net/mlx5/mlx5_defs.h

197 lines
6.7 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
* Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_DEFS_H_
#define RTE_PMD_MLX5_DEFS_H_
#include <ethdev_driver.h>
#include <rte_vxlan.h>
#include <mlx5_common_defs.h>
#include "mlx5_autoconf.h"
/* Maximum number of simultaneous VLAN filters. */
#define MLX5_MAX_VLAN_IDS 128
/*
* Request TX completion every time descriptors reach this threshold since
* the previous request. Must be a power of two for performance reasons.
*/
#define MLX5_TX_COMP_THRESH 32u
/*
* Request TX completion every time the total number of WQEBBs used for inlining
* packets exceeds the size of WQ divided by this divisor. Better to be power of
* two for performance.
*/
#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
/*
* Maximal amount of normal completion CQEs
* processed in one call of tx_burst() routine.
*/
#define MLX5_TX_COMP_MAX_CQE 2u
/*
* If defined, only use software counters. The PMD will never ask the hardware
* for these, and many of them won't be available.
*/
#ifndef MLX5_PMD_SOFT_COUNTERS
#define MLX5_PMD_SOFT_COUNTERS 1
#endif
/* Maximum number of DCS created per port. */
#define MLX5_HWS_CNT_DCS_NUM 4
/* Alarm timeout. */
#define MLX5_ALARM_TIMEOUT_US 100000
/* Maximum number of extended statistics counters. */
#define MLX5_MAX_XSTATS 64
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
#define MLX5_MAX_TSO_HEADER 192U
/* Inline data size required by NICs. */
#define MLX5_INLINE_HSIZE_NONE 0
#define MLX5_INLINE_HSIZE_L2 (sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_vlan_hdr))
#define MLX5_INLINE_HSIZE_L3 (MLX5_INLINE_HSIZE_L2 + \
sizeof(struct rte_ipv6_hdr))
#define MLX5_INLINE_HSIZE_L4 (MLX5_INLINE_HSIZE_L3 + \
sizeof(struct rte_tcp_hdr))
#define MLX5_INLINE_HSIZE_INNER_L2 (MLX5_INLINE_HSIZE_L3 + \
sizeof(struct rte_udp_hdr) + \
sizeof(struct rte_vxlan_hdr) + \
sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_vlan_hdr))
#define MLX5_INLINE_HSIZE_INNER_L3 (MLX5_INLINE_HSIZE_INNER_L2 + \
sizeof(struct rte_ipv6_hdr))
#define MLX5_INLINE_HSIZE_INNER_L4 (MLX5_INLINE_HSIZE_INNER_L3 + \
sizeof(struct rte_tcp_hdr))
/* Threshold of buffer replenishment for vectorized Rx. */
#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
(RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
/* Maximum size of burst for vectorized Rx. */
#define MLX5_VPMD_RX_MAX_BURST 64U
/* Recommended optimal burst size. */
#define MLX5_RX_DEFAULT_BURST 64U
#define MLX5_TX_DEFAULT_BURST 64U
/* Number of packets vectorized Rx can simultaneously process in a loop. */
#define MLX5_VPMD_DESCS_PER_LOOP 4
/* Mask of RSS on source only or destination only. */
#define MLX5_RSS_SRC_DST_ONLY (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
/* Supported RSS */
#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
MLX5_RSS_SRC_DST_ONLY | RTE_ETH_RSS_ESP))
/* Timeout in seconds to get a valid link status. */
#define MLX5_LINK_STATUS_TIMEOUT 10
/* Number of times to retry retrieving the physical link information. */
#define MLX5_GET_LINK_STATUS_RETRY_COUNT 3
/* Maximum number of UAR pages used by a port,
* These are the size and mask for an array of mutexes used to synchronize
* the access to port's UARs on platforms that do not support 64 bit writes.
* In such systems it is possible to issue the 64 bits DoorBells through two
* consecutive writes, each write 32 bits. The access to a UAR page (which can
* be accessible by all threads in the process) must be synchronized
* (for example, using a semaphore). Such a synchronization is not required
* when ringing DoorBells on different UAR pages.
* A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared
* among the ports.
*/
#define MLX5_UAR_PAGE_NUM_MAX 64
#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1)
/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM 6U
/* Log 2 of the default size of a stride per WQE for Multi-Packet RQ. */
#define MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE 11U
/* Two-byte shift is disabled for Multi-Packet RQ. */
#define MLX5_MPRQ_TWO_BYTE_SHIFT 0
/*
* Minimum size of packet to be memcpy'd instead of being attached as an
* external buffer.
*/
#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128
/* Minimum number Rx queues to enable Multi-Packet RQ. */
#define MLX5_MPRQ_MIN_RXQS 12
/* Cache size of mempool for Multi-Packet RQ. */
#define MLX5_MPRQ_MP_CACHE_SZ 32U
/* MLX5_DV_XMETA_EN supported values. */
net/mlx5: add devarg for extensive metadata support The PMD parameter dv_xmeta_en is added to control extensive metadata support. A nonzero value enables extensive flow metadata support if device is capable and driver supports it. This can enable extensive support of MARK and META item of rte_flow. The newly introduced SET_TAG and SET_META actions do not depend on dv_xmeta_en parameter, because there is no compatibility issue for new entities. The dv_xmeta_en is disabled by default. There are some possible configurations, depending on parameter value: - 0, this is default value, defines the legacy mode, the MARK and META related actions and items operate only within NIC Tx and NIC Rx steering domains, no MARK and META information crosses the domain boundaries. The MARK item is 24 bits wide, the META item is 32 bits wide. - 1, this engages extensive metadata mode, the MARK and META related actions and items operate within all supported steering domains, including FDB, MARK and META information may cross the domain boundaries. The ``MARK`` item is 24 bits wide, the META item width depends on kernel and firmware configurations and might be 0, 16 or 32 bits. Within NIC Tx domain META data width is 32 bits for compatibility, the actual width of data transferred to the FDB domain depends on kernel configuration and may be vary. The actual supported width can be retrieved in runtime by series of rte_flow_validate() trials. - 2, this engages extensive metadata mode, the MARK and META related actions and items operate within all supported steering domains, including FDB, MARK and META information may cross the domain boundaries. The META item is 32 bits wide, the MARK item width depends on kernel and firmware configurations and might be 0, 16 or 24 bits. The actual supported width can be retrieved in runtime by series of rte_flow_validate() trials. If there is no E-Switch configuration the ``dv_xmeta_en`` parameter is ignored and the device is configured to operate in legacy mode (0). Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com>
2019-11-07 17:09:54 +00:00
#define MLX5_XMETA_MODE_LEGACY 0
#define MLX5_XMETA_MODE_META16 1
#define MLX5_XMETA_MODE_META32 2
/* Provide info on patrial hw miss. Implies MLX5_XMETA_MODE_META16 */
#define MLX5_XMETA_MODE_MISS_INFO 3
/* Only valid in HWS, 32bits extended META without MARK support in FDB. */
#define MLX5_XMETA_MODE_META32_HWS 4
net/mlx5: add devarg for extensive metadata support The PMD parameter dv_xmeta_en is added to control extensive metadata support. A nonzero value enables extensive flow metadata support if device is capable and driver supports it. This can enable extensive support of MARK and META item of rte_flow. The newly introduced SET_TAG and SET_META actions do not depend on dv_xmeta_en parameter, because there is no compatibility issue for new entities. The dv_xmeta_en is disabled by default. There are some possible configurations, depending on parameter value: - 0, this is default value, defines the legacy mode, the MARK and META related actions and items operate only within NIC Tx and NIC Rx steering domains, no MARK and META information crosses the domain boundaries. The MARK item is 24 bits wide, the META item is 32 bits wide. - 1, this engages extensive metadata mode, the MARK and META related actions and items operate within all supported steering domains, including FDB, MARK and META information may cross the domain boundaries. The ``MARK`` item is 24 bits wide, the META item width depends on kernel and firmware configurations and might be 0, 16 or 32 bits. Within NIC Tx domain META data width is 32 bits for compatibility, the actual width of data transferred to the FDB domain depends on kernel configuration and may be vary. The actual supported width can be retrieved in runtime by series of rte_flow_validate() trials. - 2, this engages extensive metadata mode, the MARK and META related actions and items operate within all supported steering domains, including FDB, MARK and META information may cross the domain boundaries. The META item is 32 bits wide, the MARK item width depends on kernel and firmware configurations and might be 0, 16 or 24 bits. The actual supported width can be retrieved in runtime by series of rte_flow_validate() trials. If there is no E-Switch configuration the ``dv_xmeta_en`` parameter is ignored and the device is configured to operate in legacy mode (0). Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com>
2019-11-07 17:09:54 +00:00
/* Tx accurate scheduling on timestamps parameters. */
#define MLX5_TXPP_WAIT_INIT_TS 1000ul /* How long to wait timestamp. */
#define MLX5_TXPP_CLKQ_SIZE 1
#define MLX5_TXPP_REARM ((1UL << MLX5_WQ_INDEX_WIDTH) / 4)
#define MLX5_TXPP_REARM_SQ_SIZE (((1UL << MLX5_CQ_INDEX_WIDTH) / \
MLX5_TXPP_REARM) * 2)
#define MLX5_TXPP_REARM_CQ_SIZE (MLX5_TXPP_REARM_SQ_SIZE / 2)
/* The minimal size test packet to put into one WQE, padded by HW. */
#define MLX5_TXPP_TEST_PKT_SIZE (sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_ipv4_hdr))
/* Size of the simple hash table for metadata register table. */
#define MLX5_FLOW_MREG_HTABLE_SZ 64
#define MLX5_FLOW_MREG_HNAME "MARK_COPY_TABLE"
#define MLX5_DEFAULT_COPY_ID UINT32_MAX
/* Size of the simple hash table for header modify table. */
#define MLX5_FLOW_HDR_MODIFY_HTABLE_SZ (1 << 15)
/* Size of the simple hash table for encap decap table. */
#define MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ (1 << 12)
/* Size of the hash table for tag table. */
#define MLX5_TAGS_HLIST_ARRAY_SIZE (1 << 15)
/* Size fo the hash table for SFT table. */
#define MLX5_FLOW_SFT_HLIST_ARRAY_SIZE 4096
/* Hairpin TX/RX queue configuration parameters. */
#define MLX5_HAIRPIN_QUEUE_STRIDE 6
#define MLX5_HAIRPIN_JUMBO_LOG_SIZE (14 + 2)
ethdev: introduce indirect flow action Right now, rte_flow_shared_action_* APIs are used for some shared actions, like RSS, count. The shared action should be created before using it inside a flow. These shared actions sometimes are not really shared but just some indirect actions decoupled from a flow. The new functions rte_flow_action_handle_* are added to replace the current shared functions rte_flow_shared_action_*. There are two types of flow actions: 1. the direct (normal) actions that could be created and stored within a flow rule. Such action is tied to its flow rule and cannot be reused. 2. the indirect action, in the past, named shared_action. It is created from a direct actioni, like count or rss, and then used in the flow rules with an object handle. The PMD will take care of the retrieve from indirect action to the direct action when it is referenced. The indirect action is accessed (update / query) w/o any flow rule, just via the action object handle. For example, when querying or resetting a counter, it could be done out of any flow using this counter, but only the handle of the counter action object is required. The indirect action object could be shared by different flows or used by a single flow, depending on the direct action type and the real-life requirements. The handle of an indirect action object is opaque and defined in each driver and possibly different per direct action type. The old name "shared" is improper in a sense and should be replaced. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*", the testpmd application code and command line interfaces also need to be updated to do the adaption. The testpmd application user guide is also updated. All the "shared action" related parts are replaced with "indirect action" to have a correct explanation. The parameter of "update" interface is also changed. A general pointer will replace the rte_flow_action struct pointer due to the facts: 1. Some action may not support fields updating. In the example of a counter, the only "update" supported should be the reset. So passing a rte_flow_action struct pointer is meaningless and there is even no such corresponding action struct. What's more, if more than one operations should be supported, for some other action, such pointer parameter may not meet the need. 2. Some action may need conditional or partial update, the current parameter will not provide the ability to indicate which part(s) to update. For different types of indirect action objects, the pointer could either be the same of rte_flow_action* struct - in order not to break the current driver implementation, or some wrapper structures with bits as masks to indicate which part to be updated, depending on real needs of the corresponding direct action. For different direct actions, the structures of indirect action objects updating will be different. All the underlayer PMD callbacks will be moved to these new APIs. The RTE_FLOW_ACTION_TYPE_SHARED is kept for now in order not to break the ABI. All the implementations are changed by using RTE_FLOW_ACTION_TYPE_INDIRECT. Since the APIs are changed from "rte_flow_shared_action*" to the new "rte_flow_action_handle*" and the "update" interface's 3rd input parameter is changed to generic pointer, the mlx5 PMD that uses these APIs needs to do the adaption to the new APIs as well. Signed-off-by: Bing Zhao <bingz@nvidia.com> Acked-by: Andrey Vesnovaty <andreyv@nvidia.com> Acked-by: Ori Kam <orika@nvidia.com> Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
2021-04-19 14:38:29 +00:00
/* Maximum number of indirect actions supported by rte_flow */
#define MLX5_MAX_INDIRECT_ACTIONS 3
/* Maximum number of external Rx queues supported by rte_flow */
#define MLX5_MAX_EXT_RX_QUEUES (UINT16_MAX - MLX5_EXTERNAL_RX_QUEUE_ID_MIN + 1)
/*
* Linux definition of static_assert is found in /usr/include/assert.h.
* Windows does not require a redefinition.
*/
#if !defined(HAVE_STATIC_ASSERT) && !defined(RTE_EXEC_ENV_WINDOWS)
#define static_assert _Static_assert
#endif
net/mlx5: support flow counter action for HWS This commit adds HW steering counter action support. The pool mechanism is the basic data structure for the HW steering counter. The HW steering's counter pool is based on the rte_ring of zero-copy variation. There are two global rte_rings: 1. free_list: Store the counters indexes, which are ready for use. 2. wait_reset_list: Store the counters indexes, which are just freed from the user and need to query the hardware counter to get the reset value before this counter can be reused again. The counter pool also supports cache per HW steering's queues, which are also based on the rte_ring of zero-copy variation. The cache can be configured in size, preload, threshold, and fetch size, they are all exposed via device args. The main operations of the counter pool are as follows: - Get one counter from the pool: 1. The user call _get_* API. 2. If the cache is enabled, dequeue one counter index from the local cache: 2. A: if the dequeued one from the local cache is still in reset status (counter's query_gen_when_free is equal to pool's query gen): I. Flush all counters in the local cache back to global wait_reset_list. II. Fetch _fetch_sz_ counters into the cache from the global free list. III. Fetch one counter from the cache. 3. If the cache is empty, fetch _fetch_sz_ counters from the global free list into the cache and fetch one counter from the cache. - Free one counter into the pool: 1. The user calls _put_* API. 2. Put the counter into the local cache. 3. If the local cache is full: A: Write back all counters above _threshold_ into the global wait_reset_list. B: Also, write back this counter into the global wait_reset_list. When the local cache is disabled, _get_/_put_ cache directly from/into global list. Signed-off-by: Xiaoyu Min <jackmin@nvidia.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
2022-10-20 15:41:42 +00:00
#define MLX5_CNT_SVC_CYCLE_TIME_DEFAULT 500
#endif /* RTE_PMD_MLX5_DEFS_H_ */