net/mlx5: add flow UDP item
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
parent
62b2c4d925
commit
535f686e54
@ -52,6 +52,9 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
|
|||||||
#define MLX5_FLOW_FATE_DROP (1u << 0)
|
#define MLX5_FLOW_FATE_DROP (1u << 0)
|
||||||
#define MLX5_FLOW_FATE_QUEUE (1u << 1)
|
#define MLX5_FLOW_FATE_QUEUE (1u << 1)
|
||||||
|
|
||||||
|
/* possible L3 layers protocols filtering. */
|
||||||
|
#define MLX5_IP_PROTOCOL_UDP 17
|
||||||
|
|
||||||
/** Handles information leading to a drop fate. */
|
/** Handles information leading to a drop fate. */
|
||||||
struct mlx5_flow_verbs {
|
struct mlx5_flow_verbs {
|
||||||
unsigned int size; /**< Size of the attribute. */
|
unsigned int size; /**< Size of the attribute. */
|
||||||
@ -68,10 +71,12 @@ struct mlx5_flow_verbs {
|
|||||||
struct rte_flow {
|
struct rte_flow {
|
||||||
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
|
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
|
||||||
struct rte_flow_attr attributes; /**< User flow attribute. */
|
struct rte_flow_attr attributes; /**< User flow attribute. */
|
||||||
|
uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
|
||||||
uint32_t layers;
|
uint32_t layers;
|
||||||
/**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
|
/**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
|
||||||
uint32_t fate;
|
uint32_t fate;
|
||||||
/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
|
/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
|
||||||
|
uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
|
||||||
struct mlx5_flow_verbs verbs; /* Verbs flow. */
|
struct mlx5_flow_verbs verbs; /* Verbs flow. */
|
||||||
uint16_t queue; /**< Destination queue to redirect traffic to. */
|
uint16_t queue; /**< Destination queue to redirect traffic to. */
|
||||||
};
|
};
|
||||||
@ -568,8 +573,6 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
|
flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
|
||||||
if (size > flow_size)
|
|
||||||
return size;
|
|
||||||
if (spec) {
|
if (spec) {
|
||||||
ipv4.val = (struct ibv_flow_ipv4_ext_filter){
|
ipv4.val = (struct ibv_flow_ipv4_ext_filter){
|
||||||
.src_ip = spec->hdr.src_addr,
|
.src_ip = spec->hdr.src_addr,
|
||||||
@ -589,7 +592,10 @@ mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
|
|||||||
ipv4.val.proto &= ipv4.mask.proto;
|
ipv4.val.proto &= ipv4.mask.proto;
|
||||||
ipv4.val.tos &= ipv4.mask.tos;
|
ipv4.val.tos &= ipv4.mask.tos;
|
||||||
}
|
}
|
||||||
mlx5_flow_spec_verbs_add(flow, &ipv4, size);
|
flow->l3_protocol_en = !!ipv4.mask.proto;
|
||||||
|
flow->l3_protocol = ipv4.val.proto;
|
||||||
|
if (size <= flow_size)
|
||||||
|
mlx5_flow_spec_verbs_add(flow, &ipv4, size);
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -660,8 +666,6 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
|
flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
|
||||||
if (size > flow_size)
|
|
||||||
return size;
|
|
||||||
if (spec) {
|
if (spec) {
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
uint32_t vtc_flow_val;
|
uint32_t vtc_flow_val;
|
||||||
@ -701,7 +705,85 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
|
|||||||
ipv6.val.next_hdr &= ipv6.mask.next_hdr;
|
ipv6.val.next_hdr &= ipv6.mask.next_hdr;
|
||||||
ipv6.val.hop_limit &= ipv6.mask.hop_limit;
|
ipv6.val.hop_limit &= ipv6.mask.hop_limit;
|
||||||
}
|
}
|
||||||
mlx5_flow_spec_verbs_add(flow, &ipv6, size);
|
flow->l3_protocol_en = !!ipv6.mask.next_hdr;
|
||||||
|
flow->l3_protocol = ipv6.val.next_hdr;
|
||||||
|
if (size <= flow_size)
|
||||||
|
mlx5_flow_spec_verbs_add(flow, &ipv6, size);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert the @p item into a Verbs specification after ensuring the NIC
|
||||||
|
* will understand and process it correctly.
|
||||||
|
* If the necessary size for the conversion is greater than the @p flow_size,
|
||||||
|
* nothing is written in @p flow, the validation is still performed.
|
||||||
|
*
|
||||||
|
* @param[in] item
|
||||||
|
* Item specification.
|
||||||
|
* @param[in, out] flow
|
||||||
|
* Pointer to flow structure.
|
||||||
|
* @param[in] flow_size
|
||||||
|
* Size in bytes of the available space in @p flow, if too small, nothing is
|
||||||
|
* written.
|
||||||
|
* @param[out] error
|
||||||
|
* Pointer to error structure.
|
||||||
|
*
|
||||||
|
* @return
|
||||||
|
* On success the number of bytes consumed/necessary, if the returned value
|
||||||
|
* is lesser or equal to @p flow_size, the @p item has fully been converted,
|
||||||
|
* otherwise another call with this returned memory size should be done.
|
||||||
|
* On error, a negative errno value is returned and rte_errno is set.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
|
||||||
|
const size_t flow_size, struct rte_flow_error *error)
|
||||||
|
{
|
||||||
|
const struct rte_flow_item_udp *spec = item->spec;
|
||||||
|
const struct rte_flow_item_udp *mask = item->mask;
|
||||||
|
unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
|
||||||
|
struct ibv_flow_spec_tcp_udp udp = {
|
||||||
|
.type = IBV_FLOW_SPEC_UDP,
|
||||||
|
.size = size,
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
|
||||||
|
return rte_flow_error_set(error, ENOTSUP,
|
||||||
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||||
|
item,
|
||||||
|
"L3 is mandatory to filter on L4");
|
||||||
|
if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
|
||||||
|
return rte_flow_error_set(error, ENOTSUP,
|
||||||
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||||
|
item,
|
||||||
|
"L4 layer is already present");
|
||||||
|
if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
|
||||||
|
return rte_flow_error_set(error, ENOTSUP,
|
||||||
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||||
|
item,
|
||||||
|
"protocol filtering not compatible"
|
||||||
|
" with UDP layer");
|
||||||
|
if (!mask)
|
||||||
|
mask = &rte_flow_item_udp_mask;
|
||||||
|
ret = mlx5_flow_item_acceptable
|
||||||
|
(item, (const uint8_t *)mask,
|
||||||
|
(const uint8_t *)&rte_flow_item_udp_mask,
|
||||||
|
sizeof(struct rte_flow_item_udp), error);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
|
||||||
|
if (size > flow_size)
|
||||||
|
return size;
|
||||||
|
if (spec) {
|
||||||
|
udp.val.dst_port = spec->hdr.dst_port;
|
||||||
|
udp.val.src_port = spec->hdr.src_port;
|
||||||
|
udp.mask.dst_port = mask->hdr.dst_port;
|
||||||
|
udp.mask.src_port = mask->hdr.src_port;
|
||||||
|
/* Remove unwanted bits from values. */
|
||||||
|
udp.val.src_port &= udp.mask.src_port;
|
||||||
|
udp.val.dst_port &= udp.mask.dst_port;
|
||||||
|
}
|
||||||
|
mlx5_flow_spec_verbs_add(flow, &udp, size);
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -756,6 +838,9 @@ mlx5_flow_items(const struct rte_flow_item pattern[],
|
|||||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||||
ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
|
ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
|
||||||
break;
|
break;
|
||||||
|
case RTE_FLOW_ITEM_TYPE_UDP:
|
||||||
|
ret = mlx5_flow_item_udp(pattern, flow, remain, error);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return rte_flow_error_set(error, ENOTSUP,
|
return rte_flow_error_set(error, ENOTSUP,
|
||||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user