net/mlx5: improve flow item IP validation
Currently PMD doesn't check whether the user specified ethernet type is
conflicting with the followed IPv4/IPv6 items, which leads to HW refuse
to create rule, for example:
... pattern eth type is 0x86dd / ipv4 / end ...
ethernet type is IPv6 but IPv4 is following, this should be validated
as failure and report corresponding error in detail.
Fixes: 23c1d42c71
("net/mlx5: split flow validation to dedicated function")
Cc: stable@dpdk.org
Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Ori Kam <orika@mellanox.com>
This commit is contained in:
parent
5fc66630be
commit
fba3213015
@ -1403,6 +1403,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
|
||||
int
|
||||
mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint64_t last_item,
|
||||
uint16_t ether_type,
|
||||
const struct rte_flow_item_ipv4 *acc_mask,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
@ -1423,7 +1425,16 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
MLX5_FLOW_LAYER_OUTER_L4;
|
||||
int ret;
|
||||
uint8_t next_proto = 0xFF;
|
||||
const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
|
||||
MLX5_FLOW_LAYER_OUTER_VLAN |
|
||||
MLX5_FLOW_LAYER_INNER_VLAN);
|
||||
|
||||
if ((last_item & l2_vlan) && ether_type &&
|
||||
ether_type != RTE_ETHER_TYPE_IPV4)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"IPv4 cannot follow L2/VLAN layer "
|
||||
"which ether type is not IPv4");
|
||||
if (item_flags & MLX5_FLOW_LAYER_IPIP) {
|
||||
if (mask && spec)
|
||||
next_proto = mask->hdr.next_proto_id &
|
||||
@ -1494,6 +1505,8 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
int
|
||||
mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint64_t last_item,
|
||||
uint16_t ether_type,
|
||||
const struct rte_flow_item_ipv6 *acc_mask,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
@ -1519,7 +1532,16 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
|
||||
MLX5_FLOW_LAYER_OUTER_L4;
|
||||
int ret;
|
||||
uint8_t next_proto = 0xFF;
|
||||
const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
|
||||
MLX5_FLOW_LAYER_OUTER_VLAN |
|
||||
MLX5_FLOW_LAYER_INNER_VLAN);
|
||||
|
||||
if ((last_item & l2_vlan) && ether_type &&
|
||||
ether_type != RTE_ETHER_TYPE_IPV6)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"IPv6 cannot follow L2/VLAN layer "
|
||||
"which ether type is not IPv6");
|
||||
if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
|
||||
if (mask && spec)
|
||||
next_proto = mask->hdr.proto & spec->hdr.proto;
|
||||
|
@ -600,10 +600,14 @@ int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint64_t last_item,
|
||||
uint16_t ether_type,
|
||||
const struct rte_flow_item_ipv4 *acc_mask,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint64_t last_item,
|
||||
uint16_t ether_type,
|
||||
const struct rte_flow_item_ipv6 *acc_mask,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
|
||||
|
@ -3396,6 +3396,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
|
||||
uint64_t item_flags = 0;
|
||||
uint64_t last_item = 0;
|
||||
uint8_t next_protocol = 0xff;
|
||||
uint16_t ether_type = 0;
|
||||
int actions_n = 0;
|
||||
const struct rte_flow_item *gre_item = NULL;
|
||||
struct rte_flow_item_tcp nic_tcp_mask = {
|
||||
@ -3432,6 +3433,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
|
||||
MLX5_FLOW_LAYER_OUTER_L2;
|
||||
if (items->mask != NULL && items->spec != NULL) {
|
||||
ether_type =
|
||||
((const struct rte_flow_item_eth *)
|
||||
items->spec)->type;
|
||||
ether_type &=
|
||||
((const struct rte_flow_item_eth *)
|
||||
items->mask)->type;
|
||||
ether_type = rte_be_to_cpu_16(ether_type);
|
||||
} else {
|
||||
ether_type = 0;
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_VLAN:
|
||||
ret = mlx5_flow_validate_item_vlan(items, item_flags,
|
||||
@ -3440,12 +3452,25 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
|
||||
MLX5_FLOW_LAYER_OUTER_VLAN;
|
||||
if (items->mask != NULL && items->spec != NULL) {
|
||||
ether_type =
|
||||
((const struct rte_flow_item_vlan *)
|
||||
items->spec)->inner_type;
|
||||
ether_type &=
|
||||
((const struct rte_flow_item_vlan *)
|
||||
items->mask)->inner_type;
|
||||
ether_type = rte_be_to_cpu_16(ether_type);
|
||||
} else {
|
||||
ether_type = 0;
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
mlx5_flow_tunnel_ip_check(items, next_protocol,
|
||||
&item_flags, &tunnel);
|
||||
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
|
||||
NULL, error);
|
||||
last_item,
|
||||
ether_type, NULL,
|
||||
error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
|
||||
@ -3468,7 +3493,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
|
||||
mlx5_flow_tunnel_ip_check(items, next_protocol,
|
||||
&item_flags, &tunnel);
|
||||
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
|
||||
NULL, error);
|
||||
last_item,
|
||||
ether_type, NULL,
|
||||
error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
|
||||
|
@ -1037,6 +1037,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
|
||||
uint64_t item_flags = 0;
|
||||
uint64_t last_item = 0;
|
||||
uint8_t next_protocol = 0xff;
|
||||
uint16_t ether_type = 0;
|
||||
|
||||
if (items == NULL)
|
||||
return -1;
|
||||
@ -1057,6 +1058,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
|
||||
MLX5_FLOW_LAYER_OUTER_L2;
|
||||
if (items->mask != NULL && items->spec != NULL) {
|
||||
ether_type =
|
||||
((const struct rte_flow_item_eth *)
|
||||
items->spec)->type;
|
||||
ether_type &=
|
||||
((const struct rte_flow_item_eth *)
|
||||
items->mask)->type;
|
||||
ether_type = rte_be_to_cpu_16(ether_type);
|
||||
} else {
|
||||
ether_type = 0;
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_VLAN:
|
||||
ret = mlx5_flow_validate_item_vlan(items, item_flags,
|
||||
@ -1067,10 +1079,23 @@ flow_verbs_validate(struct rte_eth_dev *dev,
|
||||
MLX5_FLOW_LAYER_INNER_VLAN) :
|
||||
(MLX5_FLOW_LAYER_OUTER_L2 |
|
||||
MLX5_FLOW_LAYER_OUTER_VLAN);
|
||||
if (items->mask != NULL && items->spec != NULL) {
|
||||
ether_type =
|
||||
((const struct rte_flow_item_vlan *)
|
||||
items->spec)->inner_type;
|
||||
ether_type &=
|
||||
((const struct rte_flow_item_vlan *)
|
||||
items->mask)->inner_type;
|
||||
ether_type = rte_be_to_cpu_16(ether_type);
|
||||
} else {
|
||||
ether_type = 0;
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
|
||||
NULL, error);
|
||||
last_item,
|
||||
ether_type, NULL,
|
||||
error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
|
||||
@ -1091,7 +1116,9 @@ flow_verbs_validate(struct rte_eth_dev *dev,
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
|
||||
NULL, error);
|
||||
last_item,
|
||||
ether_type, NULL,
|
||||
error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
|
||||
|
Loading…
Reference in New Issue
Block a user