net/mlx5: add flow IPv4 item

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
Nelio Laranjeiro 2018-07-12 11:30:54 +02:00 committed by Shahaf Shuler
parent 109723ed9b
commit 4899185ff9

View File

@ -506,6 +506,93 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
return size;
}
/**
* Convert the @p item into a Verbs specification after ensuring the NIC
* will understand and process it correctly.
* If the necessary size for the conversion is greater than the @p flow_size,
* nothing is written in @p flow, the validation is still performed.
*
* @param[in] item
* Item specification.
* @param[in, out] flow
* Pointer to flow structure.
* @param[in] flow_size
* Size in bytes of the available space in @p flow, if too small, nothing is
* written.
* @param[out] error
* Pointer to error structure.
*
* @return
* On success the number of bytes consumed/necessary, if the returned value
* is lesser or equal to @p flow_size, the @p item has fully been converted,
* otherwise another call with this returned memory size should be done.
* On error, a negative errno value is returned and rte_errno is set.
*/
static int
mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
const struct rte_flow_item_ipv4 nic_mask = {
.hdr = {
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
.type_of_service = 0xff,
.next_proto_id = 0xff,
},
};
unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
struct ibv_flow_spec_ipv4_ext ipv4 = {
.type = IBV_FLOW_SPEC_IPV4_EXT,
.size = size,
};
int ret;
if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"multiple L3 layers not supported");
else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"L3 cannot follow an L4 layer.");
if (!mask)
mask = &rte_flow_item_ipv4_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ipv4), error);
if (ret < 0)
return ret;
flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (size > flow_size)
return size;
if (spec) {
ipv4.val = (struct ibv_flow_ipv4_ext_filter){
.src_ip = spec->hdr.src_addr,
.dst_ip = spec->hdr.dst_addr,
.proto = spec->hdr.next_proto_id,
.tos = spec->hdr.type_of_service,
};
ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
.src_ip = mask->hdr.src_addr,
.dst_ip = mask->hdr.dst_addr,
.proto = mask->hdr.next_proto_id,
.tos = mask->hdr.type_of_service,
};
/* Remove unwanted bits from values. */
ipv4.val.src_ip &= ipv4.mask.src_ip;
ipv4.val.dst_ip &= ipv4.mask.dst_ip;
ipv4.val.proto &= ipv4.mask.proto;
ipv4.val.tos &= ipv4.mask.tos;
}
mlx5_flow_spec_verbs_add(flow, &ipv4, size);
return size;
}
/**
* Convert the @p pattern into a Verbs specifications after ensuring the NIC
* will understand and process it correctly.
@ -551,6 +638,9 @@ mlx5_flow_items(const struct rte_flow_item pattern[],
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,