net/iavf: support raw packet for flow subscription

Add Protocol Agnostic Flow (raw flow) support for flow subscription
in AVF.

For example, testpmd creates a flow subscription raw packet rule:
rule: eth + ipv4 src is 1.1.1.1 dst is 2.2.2.2

cmd: flow create 0 ingress pattern raw pattern spec \
00000000000000000000000008004500001400000000000000000101010102020202 \
pattern mask \
0000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFF \
/ end actions port_representor port_id 0 / end

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Jie Wang 2022-10-10 15:25:44 +08:00 committed by Qi Zhang
parent dd4c04ff61
commit c3e293246e

View File

@ -57,6 +57,7 @@ static struct iavf_flow_parser iavf_fsub_parser;
static struct
iavf_pattern_match_item iavf_fsub_pattern_list[] = {
{iavf_pattern_raw, IAVF_INSET_NONE, IAVF_INSET_NONE},
{iavf_pattern_ethertype, IAVF_SW_INSET_ETHER, IAVF_INSET_NONE},
{iavf_pattern_eth_ipv4, IAVF_SW_INSET_MAC_IPV4, IAVF_INSET_NONE},
{iavf_pattern_eth_vlan_ipv4, IAVF_SW_INSET_MAC_VLAN_IPV4, IAVF_INSET_NONE},
@ -153,6 +154,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
{
struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
enum rte_flow_item_type item_type;
const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@ -164,20 +166,83 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
uint64_t outer_input_set = IAVF_INSET_NONE;
uint64_t *input = NULL;
uint16_t input_set_byte = 0;
uint16_t j;
uint8_t item_num = 0;
uint32_t layer = 0;
for (item = pattern; item->type !=
RTE_FLOW_ITEM_TYPE_END; item++) {
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not support range");
return false;
return -rte_errno;
}
item_type = item->type;
item_num++;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_RAW: {
raw_spec = item->spec;
raw_mask = item->mask;
if (item_num != 1)
return -rte_errno;
if (raw_spec->length != raw_mask->length)
return -rte_errno;
uint16_t pkt_len = 0;
uint16_t tmp_val = 0;
uint8_t tmp = 0;
int i, j;
pkt_len = raw_spec->length;
for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
tmp = raw_spec->pattern[i];
if (tmp >= 'a' && tmp <= 'f')
tmp_val = tmp - 'a' + 10;
if (tmp >= 'A' && tmp <= 'F')
tmp_val = tmp - 'A' + 10;
if (tmp >= '0' && tmp <= '9')
tmp_val = tmp - '0';
tmp_val *= 16;
tmp = raw_spec->pattern[i + 1];
if (tmp >= 'a' && tmp <= 'f')
tmp_val += (tmp - 'a' + 10);
if (tmp >= 'A' && tmp <= 'F')
tmp_val += (tmp - 'A' + 10);
if (tmp >= '0' && tmp <= '9')
tmp_val += (tmp - '0');
hdrs->raw.spec[j] = tmp_val;
tmp = raw_mask->pattern[i];
if (tmp >= 'a' && tmp <= 'f')
tmp_val = tmp - 'a' + 10;
if (tmp >= 'A' && tmp <= 'F')
tmp_val = tmp - 'A' + 10;
if (tmp >= '0' && tmp <= '9')
tmp_val = tmp - '0';
tmp_val *= 16;
tmp = raw_mask->pattern[i + 1];
if (tmp >= 'a' && tmp <= 'f')
tmp_val += (tmp - 'a' + 10);
if (tmp >= 'A' && tmp <= 'F')
tmp_val += (tmp - 'A' + 10);
if (tmp >= '0' && tmp <= '9')
tmp_val += (tmp - '0');
hdrs->raw.mask[j] = tmp_val;
}
hdrs->raw.pkt_len = pkt_len / 2;
hdrs->tunnel_level = 0;
hdrs->count = 0;
return 0;
}
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = item->spec;
eth_mask = item->mask;
@ -236,7 +301,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid IPv4 mask.");
return false;
return -rte_errno;
}
if (ipv4_mask->hdr.src_addr) {
@ -268,7 +333,9 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
hdrs->count = ++layer;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
case RTE_FLOW_ITEM_TYPE_IPV6: {
int j;
ipv6_spec = item->spec;
ipv6_mask = item->mask;
@ -283,7 +350,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid IPv6 mask");
return false;
return -rte_errno;
}
for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
@ -329,6 +396,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
hdrs->count = ++layer;
break;
}
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = item->spec;
udp_mask = item->mask;
@ -345,7 +413,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid UDP mask");
return false;
return -rte_errno;
}
if (udp_mask->hdr.src_port) {
@ -386,7 +454,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid TCP mask");
return false;
return -rte_errno;
}
if (tcp_mask->hdr.src_port) {
@ -427,7 +495,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid VLAN input set.");
return false;
return -rte_errno;
}
rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,