net/mlx5: support RSS on src or dst fields only

This patch implements the feature described in RFC [1], adding
support of RSS action on L3 and/or L4 source or destination only.

[1] http://mails.dpdk.org/archives/dev/2019-December/152796.html

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
This commit is contained in:
Dekel Peled 2019-12-18 12:05:47 +02:00 committed by Ferruh Yigit
parent c3e33304a7
commit b1d63d8293
5 changed files with 97 additions and 21 deletions

View File

@ -64,6 +64,8 @@ Features
- Multiple TX and RX queues.
- Support for scattered TX and RX frames.
- IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
- RSS using different combinations of fields: L3 only, L4 only or both,
and source only, destination only or both.
- Several RSS hash keys, one for each flow type.
- Default RSS operation with no hash key specification.
- Configurable RETA table.

View File

@ -61,6 +61,12 @@ New Features
A new API has been added to wait for a memory location to be updated with a
16-bit, 32-bit, 64-bit value.
* **Updated Mellanox mlx5 driver.**
Updated Mellanox mlx5 driver with new features and improvements, including:
* Added support for RSS using L3/L4 source/destination only.
* **Added algorithms to cryptodev API.**
* Chacha20-Poly1305 AEAD algorithm can now be supported in cryptodev.

View File

@ -104,8 +104,13 @@
/* Number of packets vectorized Rx can simultaneously process in a loop. */
#define MLX5_VPMD_DESCS_PER_LOOP 4
/* Mask of RSS on source only or destination only. */
#define MLX5_RSS_SRC_DST_ONLY (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | \
ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
/* Supported RSS */
#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
MLX5_RSS_SRC_DST_ONLY))
/* Timeout in seconds to get a valid link status. */
#define MLX5_LINK_STATUS_TIMEOUT 10

View File

@ -1150,6 +1150,18 @@ mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
&rss->types,
"some RSS protocols are not"
" supported");
if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
!(rss->types & ETH_RSS_IP))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"L3 partial RSS requested but L3 RSS"
" type not specified");
if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
!(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
"L4 partial RSS requested but L4 RSS"
" type not specified");
if (!priv->rxqs_n)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,

View File

@ -6576,6 +6576,75 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
mlx5_txq_release(dev, queue_v->queue);
}
/**
* Set the hash fields according to the @p flow information.
*
* @param[in] dev_flow
* Pointer to the mlx5_flow.
*/
static void
flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
{
struct rte_flow *flow = dev_flow->flow;
uint64_t items = dev_flow->layers;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
dev_flow->hash_fields = 0;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (flow->rss.level >= 2) {
dev_flow->hash_fields |= IBV_RX_HASH_INNER;
rss_inner = 1;
}
#endif
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
if (rss_types & ETH_RSS_L3_SRC_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
else if (rss_types & ETH_RSS_L3_DST_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
else
dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
if (rss_types & MLX5_IPV6_LAYER_TYPES) {
if (rss_types & ETH_RSS_L3_SRC_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
else if (rss_types & ETH_RSS_L3_DST_ONLY)
dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
else
dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
if (rss_types & ETH_RSS_UDP) {
if (rss_types & ETH_RSS_L4_SRC_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_SRC_PORT_UDP;
else if (rss_types & ETH_RSS_L4_DST_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_DST_PORT_UDP;
else
dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
if (rss_types & ETH_RSS_TCP) {
if (rss_types & ETH_RSS_L4_SRC_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_SRC_PORT_TCP;
else if (rss_types & ETH_RSS_L4_DST_ONLY)
dev_flow->hash_fields |=
IBV_RX_HASH_DST_PORT_TCP;
else
dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
}
}
}
/**
* Fill the flow with DV spec, lock free
* (mutex should be acquired by caller).
@ -7086,11 +7155,6 @@ __flow_dv_translate(struct rte_eth_dev *dev,
items, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV4_LAYER_TYPES,
MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
@ -7114,11 +7178,6 @@ __flow_dv_translate(struct rte_eth_dev *dev,
items, tunnel,
dev_flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV6_LAYER_TYPES,
MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
@ -7139,11 +7198,6 @@ __flow_dv_translate(struct rte_eth_dev *dev,
flow_dv_translate_item_tcp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_TCP,
IBV_RX_HASH_SRC_PORT_TCP |
IBV_RX_HASH_DST_PORT_TCP);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
@ -7151,11 +7205,6 @@ __flow_dv_translate(struct rte_eth_dev *dev,
flow_dv_translate_item_udp(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L4;
dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_UDP,
IBV_RX_HASH_SRC_PORT_UDP |
IBV_RX_HASH_DST_PORT_UDP);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
@ -7251,6 +7300,8 @@ __flow_dv_translate(struct rte_eth_dev *dev,
assert(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
dev_flow->layers = item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);