net/mlx5: validate TOS and TTL on E-Switch
This patch adds the type-of-service and time-to-live IP header fields validation on E-Switch, both for match pattern and VXLAN encapsulation action IP header itesm. The E-Switch flows will use the common mlx5_flow_validate_item_ipv4/6 routines with added extra parameter, specifying the supported fields mask. Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
This commit is contained in:
parent
363fa2f296
commit
55c61fa714
@ -1141,6 +1141,9 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
|
||||
* Item specification.
|
||||
* @param[in] item_flags
|
||||
* Bit-fields that holds the items detected until now.
|
||||
* @param[in] acc_mask
|
||||
* Acceptable mask, if NULL default internal default mask
|
||||
* will be used to check whether item fields are supported.
|
||||
* @param[out] error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
@ -1150,6 +1153,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
|
||||
int
|
||||
mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
const struct rte_flow_item_ipv4 *acc_mask,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_item_ipv4 *mask = item->mask;
|
||||
@ -1185,7 +1189,8 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
"partial mask is not supported"
|
||||
" for protocol");
|
||||
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
|
||||
(const uint8_t *)&nic_mask,
|
||||
acc_mask ? (const uint8_t *)acc_mask
|
||||
: (const uint8_t *)&nic_mask,
|
||||
sizeof(struct rte_flow_item_ipv4),
|
||||
error);
|
||||
if (ret < 0)
|
||||
@ -1200,6 +1205,9 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
* Item specification.
|
||||
* @param[in] item_flags
|
||||
* Bit-fields that holds the items detected until now.
|
||||
* @param[in] acc_mask
|
||||
* Acceptable mask, if NULL default internal default mask
|
||||
* will be used to check whether item fields are supported.
|
||||
* @param[out] error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
@ -1209,6 +1217,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
int
|
||||
mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
const struct rte_flow_item_ipv6 *acc_mask,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_item_ipv6 *mask = item->mask;
|
||||
@ -1243,7 +1252,8 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
|
||||
if (!mask)
|
||||
mask = &rte_flow_item_ipv6_mask;
|
||||
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
|
||||
(const uint8_t *)&nic_mask,
|
||||
acc_mask ? (const uint8_t *)acc_mask
|
||||
: (const uint8_t *)&nic_mask,
|
||||
sizeof(struct rte_flow_item_ipv6),
|
||||
error);
|
||||
if (ret < 0)
|
||||
|
@ -431,9 +431,11 @@ int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
const struct rte_flow_item_ipv4 *acc_mask,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
const struct rte_flow_item_ipv6 *acc_mask,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_item *item,
|
||||
|
@ -1675,7 +1675,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
|
||||
error);
|
||||
NULL, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
|
||||
@ -1696,7 +1696,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
|
||||
error);
|
||||
NULL, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
|
||||
|
@ -538,11 +538,15 @@ static const struct {
|
||||
},
|
||||
.ipv4.hdr = {
|
||||
.next_proto_id = 0xff,
|
||||
.time_to_live = 0xff,
|
||||
.type_of_service = 0xff,
|
||||
.src_addr = RTE_BE32(0xffffffff),
|
||||
.dst_addr = RTE_BE32(0xffffffff),
|
||||
},
|
||||
.ipv6.hdr = {
|
||||
.proto = 0xff,
|
||||
.vtc_flow = RTE_BE32(0xfful << IPV6_HDR_FL_SHIFT),
|
||||
.hop_limits = 0xff,
|
||||
.src_addr =
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff",
|
||||
@ -1595,8 +1599,9 @@ flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,
|
||||
break;
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
|
||||
error);
|
||||
ret = mlx5_flow_validate_item_ipv4
|
||||
(items, item_flags,
|
||||
&flow_tcf_mask_supported.ipv4, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = flow_tcf_validate_vxlan_encap_ipv4(items, error);
|
||||
@ -1605,8 +1610,9 @@ flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,
|
||||
item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
|
||||
error);
|
||||
ret = mlx5_flow_validate_item_ipv6
|
||||
(items, item_flags,
|
||||
&flow_tcf_mask_supported.ipv6, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = flow_tcf_validate_vxlan_encap_ipv6(items, error);
|
||||
@ -2125,8 +2131,9 @@ flow_tcf_validate(struct rte_eth_dev *dev,
|
||||
vlan_etype = spec.vlan->inner_type;
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
|
||||
error);
|
||||
ret = mlx5_flow_validate_item_ipv4
|
||||
(items, item_flags,
|
||||
&flow_tcf_mask_supported.ipv4, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
|
||||
@ -2185,8 +2192,9 @@ flow_tcf_validate(struct rte_eth_dev *dev,
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
|
||||
error);
|
||||
ret = mlx5_flow_validate_item_ipv6
|
||||
(items, item_flags,
|
||||
&flow_tcf_mask_supported.ipv6, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
|
||||
|
@ -1053,7 +1053,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
|
||||
error);
|
||||
NULL, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
|
||||
@ -1074,7 +1074,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
|
||||
error);
|
||||
NULL, error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
|
||||
|
Loading…
x
Reference in New Issue
Block a user