net/mlx5: support ESP item on Windows
ESP item is not supported on Windows, yet it is expanded from the expansion graph when trying to create default flow to RSS all packets. Support ESP item match (without ability to match on SPI field on Windows). Split ESP validation per OS. Signed-off-by: Raja Zidane <rzidane@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com>
This commit is contained in:
parent
2192599c75
commit
fb96caa56a
@ -9,6 +9,45 @@
|
||||
/* Key of thread specific flow workspace data. */
|
||||
static rte_thread_key key_workspace;
|
||||
|
||||
int
|
||||
mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint8_t target_protocol,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_item_esp *mask = item->mask;
|
||||
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
|
||||
const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
|
||||
MLX5_FLOW_LAYER_OUTER_L3;
|
||||
const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
|
||||
MLX5_FLOW_LAYER_OUTER_L4;
|
||||
int ret;
|
||||
|
||||
if (!(item_flags & l3m))
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"L3 is mandatory to filter on L4");
|
||||
if (item_flags & l4m)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"multiple L4 layers not supported");
|
||||
if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"protocol filtering not compatible"
|
||||
" with ESP layer");
|
||||
if (!mask)
|
||||
mask = &rte_flow_item_esp_mask;
|
||||
ret = mlx5_flow_item_acceptable
|
||||
(item, (const uint8_t *)mask,
|
||||
(const uint8_t *)&rte_flow_item_esp_mask,
|
||||
sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
|
||||
error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
mlx5_flow_os_init_workspace_once(void)
|
||||
{
|
||||
|
@ -482,4 +482,26 @@ mlx5_os_flow_dr_sync_domain(void *domain, uint32_t flags)
|
||||
{
|
||||
return mlx5_glue->dr_sync_domain(domain, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate ESP item.
|
||||
*
|
||||
* @param[in] item
|
||||
* Item specification.
|
||||
* @param[in] item_flags
|
||||
* Bit-fields that holds the items detected until now.
|
||||
* @param[in] target_protocol
|
||||
* The next protocol in the previous item.
|
||||
* @param[out] error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint8_t target_protocol,
|
||||
struct rte_flow_error *error);
|
||||
|
||||
#endif /* RTE_PMD_MLX5_FLOW_OS_H_ */
|
||||
|
@ -2646,60 +2646,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate ESP item.
|
||||
*
|
||||
* @param[in] item
|
||||
* Item specification.
|
||||
* @param[in] item_flags
|
||||
* Bit-fields that holds the items detected until now.
|
||||
* @param[in] target_protocol
|
||||
* The next protocol in the previous item.
|
||||
* @param[out] error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint8_t target_protocol,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_item_esp *mask = item->mask;
|
||||
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
|
||||
const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
|
||||
MLX5_FLOW_LAYER_OUTER_L3;
|
||||
const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
|
||||
MLX5_FLOW_LAYER_OUTER_L4;
|
||||
int ret;
|
||||
|
||||
if (!(item_flags & l3m))
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"L3 is mandatory to filter on L4");
|
||||
if (item_flags & l4m)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"multiple L4 layers not supported");
|
||||
if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"protocol filtering not compatible"
|
||||
" with ESP layer");
|
||||
if (!mask)
|
||||
mask = &rte_flow_item_esp_mask;
|
||||
ret = mlx5_flow_item_acceptable
|
||||
(item, (const uint8_t *)mask,
|
||||
(const uint8_t *)&rte_flow_item_esp_mask,
|
||||
sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
|
||||
error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate UDP item.
|
||||
*
|
||||
|
@ -1828,10 +1828,6 @@ int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
|
||||
uint8_t target_protocol,
|
||||
const struct rte_flow_item_tcp *flow_mask,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint8_t target_protocol,
|
||||
struct rte_flow_error *error);
|
||||
int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint8_t target_protocol,
|
||||
|
@ -6957,7 +6957,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
|
||||
case RTE_FLOW_ITEM_TYPE_VOID:
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_ESP:
|
||||
ret = mlx5_flow_validate_item_esp(items, item_flags,
|
||||
ret = mlx5_flow_os_validate_item_esp(items, item_flags,
|
||||
next_protocol,
|
||||
error);
|
||||
if (ret < 0)
|
||||
|
@ -416,3 +416,48 @@ mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
|
||||
rte_errno = old_err;
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint8_t target_protocol,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_item_esp *mask = item->mask;
|
||||
const struct rte_flow_item_esp *spec = item->spec;
|
||||
const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
|
||||
const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
|
||||
MLX5_FLOW_LAYER_OUTER_L3;
|
||||
const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
|
||||
MLX5_FLOW_LAYER_OUTER_L4;
|
||||
int ret;
|
||||
|
||||
if (!(item_flags & l3m))
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"L3 is mandatory to filter on L4");
|
||||
if (item_flags & l4m)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"multiple L4 layers not supported");
|
||||
if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"protocol filtering not compatible"
|
||||
" with ESP layer");
|
||||
if (!mask)
|
||||
mask = &rte_flow_item_esp_mask;
|
||||
if (spec && (spec->hdr.spi & mask->hdr.spi))
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, item,
|
||||
"matching on spi field in esp is not"
|
||||
" supported on Windows");
|
||||
ret = mlx5_flow_item_acceptable
|
||||
(item, (const uint8_t *)mask,
|
||||
(const uint8_t *)&rte_flow_item_esp_mask,
|
||||
sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
|
||||
error);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ mlx5_flow_os_item_supported(int item)
|
||||
case RTE_FLOW_ITEM_TYPE_TCP:
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
case RTE_FLOW_ITEM_TYPE_VLAN:
|
||||
case RTE_FLOW_ITEM_TYPE_ESP:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
@ -426,4 +427,26 @@ int mlx5_flow_os_create_flow(void *matcher, void *match_value,
|
||||
size_t num_actions,
|
||||
void *actions[], void **flow);
|
||||
int mlx5_flow_os_destroy_flow(void *drv_flow_ptr);
|
||||
|
||||
/**
|
||||
* Validate ESP item.
|
||||
*
|
||||
* @param[in] item
|
||||
* Item specification.
|
||||
* @param[in] item_flags
|
||||
* Bit-fields that holds the items detected until now.
|
||||
* @param[in] target_protocol
|
||||
* The next protocol in the previous item.
|
||||
* @param[out] error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
|
||||
uint64_t item_flags,
|
||||
uint8_t target_protocol,
|
||||
struct rte_flow_error *error);
|
||||
|
||||
#endif /* RTE_PMD_MLX5_FLOW_OS_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user