net/mlx5: support IP-in-IP tunnel

Enabled IP-in-IP tunnel type support on DV/DR flow engine.
This includes the following combination:
 - IPv4 over IPv4
 - IPv4 over IPv6
 - IPv6 over IPv4
 - IPv6 over IPv6

MLX5 NIC supports IP-in-IP tunnel via FLEX Parser so
need to make sure fw using FLEX Paser profile 0.

  mlxconfig -d <mst device> -y set FLEX_PARSER_PROFILE_ENABLE=0

The example testpmd commands would be:

- Match on IPv4 over IPv4 packets and do inner RSS:

  testpmd> flow create 0 ingress pattern eth / ipv4 proto is 0x04 /
           ipv4 / udp / end actions rss level 2 queues 0 1 2 3 end / end

- Match on IPv6 over IPv4 packets and do inner RSS:

  testpmd> flow create 0 ingress pattern eth / ipv4 proto is 0x29 /
           ipv6 / udp / end actions rss level 2 queues 0 1 2 3 end / end

Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
This commit is contained in:
Xiaoyu Min 2019-07-10 22:59:45 +08:00 committed by Ferruh Yigit
parent 35c2968b23
commit 5e33bebdd8
5 changed files with 92 additions and 2 deletions

View File

@ -76,7 +76,7 @@ Features
- RX interrupts. - RX interrupts.
- Statistics query including Basic, Extended and per queue. - Statistics query including Basic, Extended and per queue.
- Rx HW timestamp. - Rx HW timestamp.
- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP. - Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP, IP-in-IP.
- Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification. - Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification.
- NIC HW offloads: encapsulation (vxlan, gre, mplsoudp, mplsogre), NAT, routing, TTL - NIC HW offloads: encapsulation (vxlan, gre, mplsoudp, mplsogre), NAT, routing, TTL
increment/decrement, count, drop, mark. For details please see :ref:`Supported hardware offloads using rte_flow API`. increment/decrement, count, drop, mark. For details please see :ref:`Supported hardware offloads using rte_flow API`.
@ -540,6 +540,19 @@ Firmware configuration
mlxconfig -d <mst device> query | grep FLEX_PARSER_PROFILE_ENABLE mlxconfig -d <mst device> query | grep FLEX_PARSER_PROFILE_ENABLE
FLEX_PARSER_PROFILE_ENABLE 2 FLEX_PARSER_PROFILE_ENABLE 2
- IP-in-IP tunnel enable
.. code-block:: console
mlxconfig -d <mst device> set FLEX_PARSER_PROFILE_ENABLE=0
Verify configurations are set:
.. code-block:: console
mlxconfig -d <mst device> query | grep FLEX_PARSER_PROFILE_ENABLE
FLEX_PARSER_PROFILE_ENABLE 0
Prerequisites Prerequisites
------------- -------------

View File

@ -113,6 +113,7 @@ New Features
sequence number and acknowledgment number modification. sequence number and acknowledgment number modification.
* Added support for match on ICMP/ICMP6 code and type. * Added support for match on ICMP/ICMP6 code and type.
* Added support for matching on GRE's key and C,K,S present bits. * Added support for matching on GRE's key and C,K,S present bits.
* Added support for IP-in-IP tunnel.
* **Updated Solarflare network PMD.** * **Updated Solarflare network PMD.**

View File

@ -1282,6 +1282,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
struct rte_flow_error *error) struct rte_flow_error *error)
{ {
const struct rte_flow_item_ipv4 *mask = item->mask; const struct rte_flow_item_ipv4 *mask = item->mask;
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 nic_mask = { const struct rte_flow_item_ipv4 nic_mask = {
.hdr = { .hdr = {
.src_addr = RTE_BE32(0xffffffff), .src_addr = RTE_BE32(0xffffffff),
@ -1296,7 +1297,24 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
MLX5_FLOW_LAYER_OUTER_L4; MLX5_FLOW_LAYER_OUTER_L4;
int ret; int ret;
uint8_t next_proto = 0xFF;
if (item_flags & MLX5_FLOW_LAYER_IPIP) {
if (mask && spec)
next_proto = mask->hdr.next_proto_id &
spec->hdr.next_proto_id;
if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"multiple tunnel "
"not supported");
}
if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"wrong tunnel type - IPv6 specified "
"but IPv4 item provided");
if (item_flags & l3m) if (item_flags & l3m)
return rte_flow_error_set(error, ENOTSUP, return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item, RTE_FLOW_ERROR_TYPE_ITEM, item,
@ -1346,6 +1364,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
struct rte_flow_error *error) struct rte_flow_error *error)
{ {
const struct rte_flow_item_ipv6 *mask = item->mask; const struct rte_flow_item_ipv6 *mask = item->mask;
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 nic_mask = { const struct rte_flow_item_ipv6 nic_mask = {
.hdr = { .hdr = {
.src_addr = .src_addr =
@ -1365,7 +1384,23 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
MLX5_FLOW_LAYER_OUTER_L4; MLX5_FLOW_LAYER_OUTER_L4;
int ret; int ret;
uint8_t next_proto = 0xFF;
if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
if (mask && spec)
next_proto = mask->hdr.proto & spec->hdr.proto;
if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"multiple tunnel "
"not supported");
}
if (item_flags & MLX5_FLOW_LAYER_IPIP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"wrong tunnel type - IPv4 specified "
"but IPv6 item provided");
if (item_flags & l3m) if (item_flags & l3m)
return rte_flow_error_set(error, ENOTSUP, return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item, RTE_FLOW_ERROR_TYPE_ITEM, item,

View File

@ -55,6 +55,9 @@
#define MLX5_FLOW_LAYER_ICMP6 (1u << 19) #define MLX5_FLOW_LAYER_ICMP6 (1u << 19)
#define MLX5_FLOW_LAYER_GRE_KEY (1u << 20) #define MLX5_FLOW_LAYER_GRE_KEY (1u << 20)
#define MLX5_FLOW_LAYER_IPIP (1u << 21)
#define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 22)
/* Outer Masks. */ /* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \ #define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@ -67,7 +70,8 @@
/* Tunnel Masks. */ /* Tunnel Masks. */
#define MLX5_FLOW_LAYER_TUNNEL \ #define MLX5_FLOW_LAYER_TUNNEL \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS) MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS | \
MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP)
/* Inner Masks. */ /* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \ #define MLX5_FLOW_LAYER_INNER_L3 \

View File

@ -141,6 +141,39 @@ struct field_modify_info modify_tcp[] = {
{0, 0, 0}, {0, 0, 0},
}; };
static void
mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
{
uint8_t next_protocol = 0xFF;
if (item->mask != NULL) {
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_IPV4:
next_protocol =
((const struct rte_flow_item_ipv4 *)
(item->spec))->hdr.next_proto_id;
next_protocol &=
((const struct rte_flow_item_ipv4 *)
(item->mask))->hdr.next_proto_id;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
next_protocol =
((const struct rte_flow_item_ipv6 *)
(item->spec))->hdr.proto;
next_protocol &=
((const struct rte_flow_item_ipv6 *)
(item->mask))->hdr.proto;
break;
default:
break;
}
}
if (next_protocol == IPPROTO_IPIP)
*flags |= MLX5_FLOW_LAYER_IPIP;
if (next_protocol == IPPROTO_IPV6)
*flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
}
/** /**
* Acquire the synchronizing object to protect multithreaded access * Acquire the synchronizing object to protect multithreaded access
* to shared dv context. Lock occurs only if context is actually * to shared dv context. Lock occurs only if context is actually
@ -2356,6 +2389,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
/* Reset for inner layer. */ /* Reset for inner layer. */
next_protocol = 0xff; next_protocol = 0xff;
} }
mlx5_flow_tunnel_ip_check(items, &last_item);
break; break;
case RTE_FLOW_ITEM_TYPE_IPV6: case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags, ret = mlx5_flow_validate_item_ipv6(items, item_flags,
@ -2377,6 +2411,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
/* Reset for inner layer. */ /* Reset for inner layer. */
next_protocol = 0xff; next_protocol = 0xff;
} }
mlx5_flow_tunnel_ip_check(items, &last_item);
break; break;
case RTE_FLOW_ITEM_TYPE_TCP: case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp ret = mlx5_flow_validate_item_tcp
@ -4427,6 +4462,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
MLX5_IPV4_IBV_RX_HASH); MLX5_IPV4_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4; MLX5_FLOW_LAYER_OUTER_L3_IPV4;
mlx5_flow_tunnel_ip_check(items, &last_item);
break; break;
case RTE_FLOW_ITEM_TYPE_IPV6: case RTE_FLOW_ITEM_TYPE_IPV6:
flow_dv_translate_item_ipv6(match_mask, match_value, flow_dv_translate_item_ipv6(match_mask, match_value,
@ -4439,6 +4475,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
MLX5_IPV6_IBV_RX_HASH); MLX5_IPV6_IBV_RX_HASH);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6; MLX5_FLOW_LAYER_OUTER_L3_IPV6;
mlx5_flow_tunnel_ip_check(items, &last_item);
break; break;
case RTE_FLOW_ITEM_TYPE_TCP: case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value, flow_dv_translate_item_tcp(match_mask, match_value,