net/ice: enable protocol agnostic flow offloading in FDIR

Protocol agnostic flow offloading in Flow Director is enabled by this
patch based on the Parser Library, using existing rte_flow raw API.

Note that the raw flow requires:
1. byte string of raw target packet bits.
2. byte string of mask of target packet.

Here is an example:
FDIR matching ipv4 dst addr with 1.2.3.4 and redirect to queue 3:

flow create 0 ingress pattern raw \
pattern spec \
00000000000000000000000008004500001400004000401000000000000001020304 \
pattern mask \
000000000000000000000000000000000000000000000000000000000000ffffffff \
/ end actions queue index 3 / mark id 3 / end

Note that mask of some key bits (e.g., 0x0800 to indicate ipv4 proto)
is optional in our cases. To avoid redundancy, we just omit the mask
of 0x0800 (with 0xFFFF) in the mask byte string example. The prefix
'0x' for the spec and mask byte (hex) strings are also omitted here.

Also update the ice feature list with rte_flow item raw.

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Junfeng Guo 2021-11-03 12:40:03 +08:00 committed by Qi Zhang
parent deaa33c251
commit 25be39cc17
6 changed files with 263 additions and 0 deletions

View File

@ -63,6 +63,7 @@ pfcp = Y
pppoed = Y
pppoes = Y
pppoe_proto_id = Y
raw = Y
sctp = Y
tcp = Y
udp = Y

View File

@ -167,6 +167,7 @@ New Features
* **Updated Intel ice driver.**
* Added protocol agnostic flow offloading support in Flow Director.
* Added 1PPS out support by a devargs.
* Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
* Added DEV_RX_OFFLOAD_TIMESTAMP support.

View File

@ -318,6 +318,11 @@ struct ice_fdir_filter_conf {
uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */
uint64_t input_set_i; /* only for tunnel inner fields */
uint32_t mark_flag;
struct ice_parser_profile *prof;
bool parser_ena;
u8 *pkt_buf;
u8 pkt_len;
};
#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16)
@ -487,6 +492,14 @@ struct ice_devargs {
uint8_t pps_out_ena;
};
/**
* Structure to store fdir fv entry.
*/
struct ice_fdir_prof_info {
struct ice_parser_profile prof;
u64 fdir_actived_cnt;
};
/**
* Structure to store private data for each PF/VF instance.
*/
@ -510,6 +523,7 @@ struct ice_adapter {
struct rte_timecounter tx_tstamp_tc;
bool ptp_ena;
uint64_t time_hw;
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
#ifdef RTE_ARCH_X86
bool rx_use_avx2;
bool rx_use_avx512;

View File

@ -107,6 +107,7 @@
ICE_INSET_NAT_T_ESP_SPI)
static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
{pattern_raw, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
@ -1188,6 +1189,24 @@ ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
return 0;
}
static int
ice_fdir_add_del_raw(struct ice_pf *pf,
struct ice_fdir_filter_conf *filter,
bool add)
{
struct ice_hw *hw = ICE_PF_TO_HW(pf);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
rte_memcpy(pkt, filter->pkt_buf, filter->pkt_len);
struct ice_fltr_desc desc;
memset(&desc, 0, sizeof(desc));
filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
return ice_fdir_programming(pf, &desc);
}
static int
ice_fdir_add_del_filter(struct ice_pf *pf,
struct ice_fdir_filter_conf *filter,
@ -1303,6 +1322,68 @@ ice_fdir_create_filter(struct ice_adapter *ad,
struct ice_fdir_fltr_pattern key;
bool is_tun;
int ret;
int i;
if (filter->parser_ena) {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
u16 main_vsi = pf->main_vsi->idx;
bool fv_found = false;
struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
if (pi->fdir_actived_cnt != 0) {
for (i = 0; i < ICE_MAX_FV_WORDS; i++)
if (pi->prof.fv[i].proto_id !=
filter->prof->fv[i].proto_id ||
pi->prof.fv[i].offset !=
filter->prof->fv[i].offset ||
pi->prof.fv[i].msk !=
filter->prof->fv[i].msk)
break;
if (i == ICE_MAX_FV_WORDS) {
fv_found = true;
pi->fdir_actived_cnt++;
}
}
if (!fv_found) {
ret = ice_flow_set_hw_prof(hw, main_vsi, ctrl_vsi,
filter->prof, ICE_BLK_FD);
if (ret)
goto error;
}
ret = ice_fdir_add_del_raw(pf, filter, true);
if (ret)
goto error;
if (!fv_found) {
for (i = 0; i < filter->prof->fv_num; i++) {
pi->prof.fv[i].proto_id =
filter->prof->fv[i].proto_id;
pi->prof.fv[i].offset =
filter->prof->fv[i].offset;
pi->prof.fv[i].msk = filter->prof->fv[i].msk;
}
pi->fdir_actived_cnt = 1;
}
if (filter->mark_flag == 1)
ice_fdir_rx_parsing_enable(ad, 1);
entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
if (!entry)
goto error;
rte_memcpy(entry, filter, sizeof(*filter));
flow->rule = entry;
return 0;
}
ice_fdir_extract_fltr_key(&key, filter);
node = ice_fdir_entry_lookup(fdir_info, &key);
@ -1381,6 +1462,11 @@ ice_fdir_create_filter(struct ice_adapter *ad,
free_entry:
rte_free(entry);
return -rte_errno;
error:
rte_free(filter->prof);
rte_free(filter->pkt_buf);
return -rte_errno;
}
static int
@ -1397,6 +1483,44 @@ ice_fdir_destroy_filter(struct ice_adapter *ad,
filter = (struct ice_fdir_filter_conf *)flow->rule;
if (filter->parser_ena) {
struct ice_hw *hw = ICE_PF_TO_HW(pf);
int id = ice_find_first_bit(filter->prof->ptypes, UINT16_MAX);
int ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
u16 ctrl_vsi = pf->fdir.fdir_vsi->idx;
u16 main_vsi = pf->main_vsi->idx;
enum ice_block blk = ICE_BLK_FD;
u16 vsi_num;
ret = ice_fdir_add_del_raw(pf, filter, false);
if (ret)
return -rte_errno;
struct ice_fdir_prof_info *pi = &ad->fdir_prof_info[ptg];
if (pi->fdir_actived_cnt != 0) {
pi->fdir_actived_cnt--;
if (!pi->fdir_actived_cnt) {
vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi);
ice_rem_prof_id_flow(hw, blk, vsi_num, id);
vsi_num = ice_get_hw_vsi_num(hw, main_vsi);
ice_rem_prof_id_flow(hw, blk, vsi_num, id);
}
}
if (filter->mark_flag == 1)
ice_fdir_rx_parsing_enable(ad, 0);
flow->rule = NULL;
rte_free(filter->prof);
rte_free(filter->pkt_buf);
rte_free(filter);
return 0;
}
is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
if (filter->counter) {
@ -1675,6 +1799,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
const struct rte_flow_item_raw *raw_spec, *raw_mask;
const struct rte_flow_item_eth *eth_spec, *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@ -1702,6 +1827,9 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
struct ice_fdir_extra *p_ext_data;
struct ice_fdir_v4 *p_v4 = NULL;
struct ice_fdir_v6 *p_v6 = NULL;
struct ice_parser_result rslt;
struct ice_parser *psr;
uint8_t item_num = 0;
for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
@ -1713,6 +1841,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
is_outer = false;
}
item_num++;
}
/* This loop parse flow pattern and distinguish Non-tunnel and tunnel
@ -1733,6 +1862,102 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
&input_set_i : &input_set_o;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_RAW: {
raw_spec = item->spec;
raw_mask = item->mask;
if (item_num != 1)
break;
/* convert raw spec & mask from byte string to int */
unsigned char *tmp_spec =
(uint8_t *)(uintptr_t)raw_spec->pattern;
unsigned char *tmp_mask =
(uint8_t *)(uintptr_t)raw_mask->pattern;
uint16_t udp_port = 0;
uint16_t tmp_val = 0;
uint8_t pkt_len = 0;
uint8_t tmp = 0;
int i, j;
pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
pkt_len)
return -rte_errno;
for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
tmp = tmp_spec[i];
if (tmp >= 'a' && tmp <= 'f')
tmp_val = tmp - 'a' + 10;
if (tmp >= 'A' && tmp <= 'F')
tmp_val = tmp - 'A' + 10;
if (tmp >= '0' && tmp <= '9')
tmp_val = tmp - '0';
tmp_val *= 16;
tmp = tmp_spec[i + 1];
if (tmp >= 'a' && tmp <= 'f')
tmp_spec[j] = tmp_val + tmp - 'a' + 10;
if (tmp >= 'A' && tmp <= 'F')
tmp_spec[j] = tmp_val + tmp - 'A' + 10;
if (tmp >= '0' && tmp <= '9')
tmp_spec[j] = tmp_val + tmp - '0';
tmp = tmp_mask[i];
if (tmp >= 'a' && tmp <= 'f')
tmp_val = tmp - 'a' + 10;
if (tmp >= 'A' && tmp <= 'F')
tmp_val = tmp - 'A' + 10;
if (tmp >= '0' && tmp <= '9')
tmp_val = tmp - '0';
tmp_val *= 16;
tmp = tmp_mask[i + 1];
if (tmp >= 'a' && tmp <= 'f')
tmp_mask[j] = tmp_val + tmp - 'a' + 10;
if (tmp >= 'A' && tmp <= 'F')
tmp_mask[j] = tmp_val + tmp - 'A' + 10;
if (tmp >= '0' && tmp <= '9')
tmp_mask[j] = tmp_val + tmp - '0';
}
pkt_len /= 2;
if (ice_parser_create(&ad->hw, &psr))
return -rte_errno;
if (ice_get_open_tunnel_port(&ad->hw, TNL_VXLAN,
&udp_port))
ice_parser_vxlan_tunnel_set(psr, udp_port,
true);
if (ice_parser_run(psr, tmp_spec, pkt_len, &rslt))
return -rte_errno;
ice_parser_destroy(psr);
if (!tmp_mask)
return -rte_errno;
filter->prof = (struct ice_parser_profile *)
ice_malloc(&ad->hw, sizeof(*filter->prof));
if (!filter->prof)
return -ENOMEM;
if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
pkt_len, ICE_BLK_FD, true, filter->prof))
return -rte_errno;
u8 *pkt_buf = (u8 *)ice_malloc(&ad->hw, pkt_len + 1);
if (!pkt_buf)
return -ENOMEM;
rte_memcpy(pkt_buf, tmp_spec, pkt_len);
filter->pkt_buf = pkt_buf;
filter->pkt_len = pkt_len;
filter->parser_ena = true;
break;
}
case RTE_FLOW_ITEM_TYPE_ETH:
flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
eth_spec = item->spec;
@ -2198,6 +2423,7 @@ ice_fdir_parse(struct ice_adapter *ad,
struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
struct ice_pattern_match_item *item = NULL;
uint64_t input_set;
bool raw = false;
int ret;
memset(filter, 0, sizeof(*filter));
@ -2213,7 +2439,13 @@ ice_fdir_parse(struct ice_adapter *ad,
ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
if (ret)
goto error;
if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
raw = true;
input_set = filter->input_set_o | filter->input_set_i;
input_set = raw ? ~input_set : input_set;
if (!input_set || filter->input_set_o &
~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
filter->input_set_i & ~item->input_set_mask_i) {
@ -2231,7 +2463,12 @@ ice_fdir_parse(struct ice_adapter *ad,
if (meta)
*meta = filter;
rte_free(item);
return ret;
error:
rte_free(filter->prof);
rte_free(filter->pkt_buf);
rte_free(item);
return ret;
}

View File

@ -65,6 +65,12 @@ enum rte_flow_item_type pattern_empty[] = {
RTE_FLOW_ITEM_TYPE_END,
};
/* raw */
enum rte_flow_item_type pattern_raw[] = {
RTE_FLOW_ITEM_TYPE_RAW,
RTE_FLOW_ITEM_TYPE_END,
};
/* L2 */
enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_ETH,
@ -2081,6 +2087,7 @@ struct ice_ptype_match {
};
static struct ice_ptype_match ice_ptype_map[] = {
{pattern_raw, ICE_PTYPE_IPV4_PAY},
{pattern_eth_ipv4, ICE_PTYPE_IPV4_PAY},
{pattern_eth_ipv4_udp, ICE_PTYPE_IPV4_UDP_PAY},
{pattern_eth_ipv4_tcp, ICE_PTYPE_IPV4_TCP_PAY},

View File

@ -124,6 +124,9 @@
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];
/* raw pattern */
extern enum rte_flow_item_type pattern_raw[];
/* L2 */
extern enum rte_flow_item_type pattern_ethertype[];
extern enum rte_flow_item_type pattern_ethertype_vlan[];