net/i40e: fix flow director flex configuration
The configuration of FDIR flex mask and flex pit should not be set
during flow validate. It should be set when flow create.
Fixes: 6ced3dd72f
("net/i40e: support flexible payload parsing for FDIR")
Cc: stable@dpdk.org
Signed-off-by: Chenxu Di <chenxux.di@intel.com>
Tested-by: Jun W Zhou <junx.w.zhou@intel.com>
Acked-by: Jeff Guo <jia.guo@intel.com>
This commit is contained in:
parent
333e1bde9d
commit
fe5d0e85b7
@ -610,11 +610,22 @@ enum i40e_fdir_ip_type {
|
||||
I40E_FDIR_IPTYPE_IPV6,
|
||||
};
|
||||
|
||||
/**
|
||||
* Structure to store flex pit for flow diretor.
|
||||
*/
|
||||
struct i40e_fdir_flex_pit {
|
||||
uint8_t src_offset; /* offset in words from the beginning of payload */
|
||||
uint8_t size; /* size in words */
|
||||
uint8_t dst_offset; /* offset in words of flexible payload */
|
||||
};
|
||||
|
||||
/* A structure used to contain extend input of flow */
|
||||
struct i40e_fdir_flow_ext {
|
||||
uint16_t vlan_tci;
|
||||
uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
|
||||
/* It is filled by the flexible payload to match. */
|
||||
uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
|
||||
uint8_t raw_id;
|
||||
uint8_t is_vf; /* 1 for VF, 0 for port dev */
|
||||
uint16_t dst_id; /* VF ID, available when is_vf is 1*/
|
||||
bool inner_ip; /* If there is inner ip */
|
||||
@ -623,6 +634,8 @@ struct i40e_fdir_flow_ext {
|
||||
bool customized_pctype; /* If customized pctype is used */
|
||||
bool pkt_template; /* If raw packet template is used */
|
||||
bool is_udp; /* ipv4|ipv6 udp flow */
|
||||
enum i40e_flxpld_layer_idx layer_idx;
|
||||
struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
|
||||
};
|
||||
|
||||
/* A structure used to define the input for a flow director filter entry */
|
||||
@ -673,15 +686,6 @@ struct i40e_fdir_filter_conf {
|
||||
struct i40e_fdir_action action; /* Action taken when match */
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure to store flex pit for flow diretor.
|
||||
*/
|
||||
struct i40e_fdir_flex_pit {
|
||||
uint8_t src_offset; /* offset in words from the beginning of payload */
|
||||
uint8_t size; /* size in words */
|
||||
uint8_t dst_offset; /* offset in words of flexible payload */
|
||||
};
|
||||
|
||||
struct i40e_fdir_flex_mask {
|
||||
uint8_t word_mask; /**< Bit i enables word i of flexible payload */
|
||||
uint8_t nb_bitmask;
|
||||
|
@ -1442,6 +1442,153 @@ i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
|
||||
rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx);
|
||||
}
|
||||
|
||||
static int
|
||||
i40e_flow_store_flex_pit(struct i40e_pf *pf,
|
||||
struct i40e_fdir_flex_pit *flex_pit,
|
||||
enum i40e_flxpld_layer_idx layer_idx,
|
||||
uint8_t raw_id)
|
||||
{
|
||||
uint8_t field_idx;
|
||||
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
|
||||
/* Check if the configuration is conflicted */
|
||||
if (pf->fdir.flex_pit_flag[layer_idx] &&
|
||||
(pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
|
||||
pf->fdir.flex_set[field_idx].size != flex_pit->size ||
|
||||
pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
|
||||
return -1;
|
||||
|
||||
/* Check if the configuration exists. */
|
||||
if (pf->fdir.flex_pit_flag[layer_idx] &&
|
||||
(pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
|
||||
pf->fdir.flex_set[field_idx].size == flex_pit->size &&
|
||||
pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
|
||||
return 1;
|
||||
|
||||
pf->fdir.flex_set[field_idx].src_offset =
|
||||
flex_pit->src_offset;
|
||||
pf->fdir.flex_set[field_idx].size =
|
||||
flex_pit->size;
|
||||
pf->fdir.flex_set[field_idx].dst_offset =
|
||||
flex_pit->dst_offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
|
||||
enum i40e_flxpld_layer_idx layer_idx,
|
||||
uint8_t raw_id)
|
||||
{
|
||||
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
|
||||
uint32_t flx_pit, flx_ort;
|
||||
uint16_t min_next_off = 0;
|
||||
uint8_t field_idx;
|
||||
uint8_t i;
|
||||
|
||||
if (raw_id) {
|
||||
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
|
||||
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
|
||||
(layer_idx * I40E_MAX_FLXPLD_FIED);
|
||||
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
|
||||
}
|
||||
|
||||
/* Set flex pit */
|
||||
for (i = 0; i < raw_id; i++) {
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
|
||||
flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
|
||||
pf->fdir.flex_set[field_idx].size,
|
||||
pf->fdir.flex_set[field_idx].dst_offset);
|
||||
|
||||
I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
|
||||
min_next_off = pf->fdir.flex_set[field_idx].src_offset +
|
||||
pf->fdir.flex_set[field_idx].size;
|
||||
}
|
||||
|
||||
for (; i < I40E_MAX_FLXPLD_FIED; i++) {
|
||||
/* set the non-used register obeying register's constrain */
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
|
||||
flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
|
||||
NONUSE_FLX_PIT_DEST_OFF);
|
||||
I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
|
||||
min_next_off++;
|
||||
}
|
||||
|
||||
pf->fdir.flex_pit_flag[layer_idx] = 1;
|
||||
}
|
||||
|
||||
static int
|
||||
i40e_flow_store_flex_mask(struct i40e_pf *pf,
|
||||
enum i40e_filter_pctype pctype,
|
||||
uint8_t *mask)
|
||||
{
|
||||
struct i40e_fdir_flex_mask flex_mask;
|
||||
uint8_t nb_bitmask = 0;
|
||||
uint16_t mask_tmp;
|
||||
uint8_t i;
|
||||
|
||||
memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
|
||||
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
|
||||
mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
|
||||
if (mask_tmp) {
|
||||
flex_mask.word_mask |=
|
||||
I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
|
||||
if (mask_tmp != UINT16_MAX) {
|
||||
flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
|
||||
flex_mask.bitmask[nb_bitmask].offset =
|
||||
i / sizeof(uint16_t);
|
||||
nb_bitmask++;
|
||||
if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
flex_mask.nb_bitmask = nb_bitmask;
|
||||
|
||||
if (pf->fdir.flex_mask_flag[pctype] &&
|
||||
(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
|
||||
sizeof(struct i40e_fdir_flex_mask))))
|
||||
return -2;
|
||||
else if (pf->fdir.flex_mask_flag[pctype] &&
|
||||
!(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
|
||||
sizeof(struct i40e_fdir_flex_mask))))
|
||||
return 1;
|
||||
|
||||
memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
|
||||
sizeof(struct i40e_fdir_flex_mask));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
|
||||
enum i40e_filter_pctype pctype)
|
||||
{
|
||||
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
|
||||
struct i40e_fdir_flex_mask *flex_mask;
|
||||
uint32_t flxinset, fd_mask;
|
||||
uint8_t i;
|
||||
|
||||
/* Set flex mask */
|
||||
flex_mask = &pf->fdir.flex_mask[pctype];
|
||||
flxinset = (flex_mask->word_mask <<
|
||||
I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
|
||||
I40E_PRTQF_FD_FLXINSET_INSET_MASK;
|
||||
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
|
||||
|
||||
for (i = 0; i < flex_mask->nb_bitmask; i++) {
|
||||
fd_mask = (flex_mask->bitmask[i].mask <<
|
||||
I40E_PRTQF_FD_MSK_MASK_SHIFT) &
|
||||
I40E_PRTQF_FD_MSK_MASK_MASK;
|
||||
fd_mask |= ((flex_mask->bitmask[i].offset +
|
||||
I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
|
||||
I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
|
||||
I40E_PRTQF_FD_MSK_OFFSET_MASK;
|
||||
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
|
||||
}
|
||||
|
||||
pf->fdir.flex_mask_flag[pctype] = 1;
|
||||
}
|
||||
|
||||
static inline unsigned char *
|
||||
i40e_find_available_buffer(struct rte_eth_dev *dev)
|
||||
{
|
||||
@ -1494,13 +1641,19 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
|
||||
{
|
||||
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
unsigned char *pkt = NULL;
|
||||
enum i40e_filter_pctype pctype;
|
||||
enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
|
||||
struct i40e_fdir_info *fdir_info = &pf->fdir;
|
||||
struct i40e_fdir_filter *node;
|
||||
uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
|
||||
struct i40e_fdir_filter check_filter; /* Check if the filter exists */
|
||||
struct i40e_fdir_flex_pit flex_pit;
|
||||
enum i40e_filter_pctype pctype;
|
||||
struct i40e_fdir_filter *node;
|
||||
unsigned char *pkt = NULL;
|
||||
bool cfg_flex_pit = true;
|
||||
bool wait_status = true;
|
||||
uint8_t field_idx;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
if (pf->fdir.fdir_vsi == NULL) {
|
||||
PMD_DRV_LOG(ERR, "FDIR is not enabled");
|
||||
@ -1533,6 +1686,47 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
|
||||
i40e_fdir_filter_convert(filter, &check_filter);
|
||||
|
||||
if (add) {
|
||||
if (!filter->input.flow_ext.customized_pctype) {
|
||||
for (i = 0; i < filter->input.flow_ext.raw_id; i++) {
|
||||
layer_idx = filter->input.flow_ext.layer_idx;
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
|
||||
flex_pit = filter->input.flow_ext.flex_pit[field_idx];
|
||||
|
||||
/* Store flex pit to SW */
|
||||
ret = i40e_flow_store_flex_pit(pf, &flex_pit,
|
||||
layer_idx, i);
|
||||
if (ret < 0) {
|
||||
PMD_DRV_LOG(ERR, "Conflict with the"
|
||||
" first flexible rule.");
|
||||
return -EINVAL;
|
||||
} else if (ret > 0) {
|
||||
cfg_flex_pit = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (cfg_flex_pit)
|
||||
i40e_flow_set_fdir_flex_pit(pf, layer_idx,
|
||||
filter->input.flow_ext.raw_id);
|
||||
|
||||
/* Store flex mask to SW */
|
||||
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++)
|
||||
flex_mask[i] =
|
||||
filter->input.flow_ext.flex_mask[i];
|
||||
|
||||
ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
|
||||
if (ret == -1) {
|
||||
PMD_DRV_LOG(ERR, "Exceed maximal"
|
||||
" number of bitmasks");
|
||||
return -EINVAL;
|
||||
} else if (ret == -2) {
|
||||
PMD_DRV_LOG(ERR, "Conflict with the"
|
||||
" first flexible rule");
|
||||
return -EINVAL;
|
||||
} else if (ret == 0) {
|
||||
i40e_flow_set_fdir_flex_msk(pf, pctype);
|
||||
}
|
||||
}
|
||||
|
||||
ret = i40e_sw_fdir_filter_insert(pf, &check_filter);
|
||||
if (ret < 0) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
|
@ -2243,152 +2243,6 @@ i40e_flow_check_raw_item(const struct rte_flow_item *item,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i40e_flow_store_flex_pit(struct i40e_pf *pf,
|
||||
struct i40e_fdir_flex_pit *flex_pit,
|
||||
enum i40e_flxpld_layer_idx layer_idx,
|
||||
uint8_t raw_id)
|
||||
{
|
||||
uint8_t field_idx;
|
||||
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
|
||||
/* Check if the configuration is conflicted */
|
||||
if (pf->fdir.flex_pit_flag[layer_idx] &&
|
||||
(pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
|
||||
pf->fdir.flex_set[field_idx].size != flex_pit->size ||
|
||||
pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
|
||||
return -1;
|
||||
|
||||
/* Check if the configuration exists. */
|
||||
if (pf->fdir.flex_pit_flag[layer_idx] &&
|
||||
(pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
|
||||
pf->fdir.flex_set[field_idx].size == flex_pit->size &&
|
||||
pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
|
||||
return 1;
|
||||
|
||||
pf->fdir.flex_set[field_idx].src_offset =
|
||||
flex_pit->src_offset;
|
||||
pf->fdir.flex_set[field_idx].size =
|
||||
flex_pit->size;
|
||||
pf->fdir.flex_set[field_idx].dst_offset =
|
||||
flex_pit->dst_offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i40e_flow_store_flex_mask(struct i40e_pf *pf,
|
||||
enum i40e_filter_pctype pctype,
|
||||
uint8_t *mask)
|
||||
{
|
||||
struct i40e_fdir_flex_mask flex_mask;
|
||||
uint16_t mask_tmp;
|
||||
uint8_t i, nb_bitmask = 0;
|
||||
|
||||
memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
|
||||
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
|
||||
mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
|
||||
if (mask_tmp) {
|
||||
flex_mask.word_mask |=
|
||||
I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
|
||||
if (mask_tmp != UINT16_MAX) {
|
||||
flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
|
||||
flex_mask.bitmask[nb_bitmask].offset =
|
||||
i / sizeof(uint16_t);
|
||||
nb_bitmask++;
|
||||
if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
flex_mask.nb_bitmask = nb_bitmask;
|
||||
|
||||
if (pf->fdir.flex_mask_flag[pctype] &&
|
||||
(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
|
||||
sizeof(struct i40e_fdir_flex_mask))))
|
||||
return -2;
|
||||
else if (pf->fdir.flex_mask_flag[pctype] &&
|
||||
!(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
|
||||
sizeof(struct i40e_fdir_flex_mask))))
|
||||
return 1;
|
||||
|
||||
memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
|
||||
sizeof(struct i40e_fdir_flex_mask));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
|
||||
enum i40e_flxpld_layer_idx layer_idx,
|
||||
uint8_t raw_id)
|
||||
{
|
||||
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
|
||||
uint32_t flx_pit, flx_ort;
|
||||
uint8_t field_idx;
|
||||
uint16_t min_next_off = 0; /* in words */
|
||||
uint8_t i;
|
||||
|
||||
if (raw_id) {
|
||||
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
|
||||
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
|
||||
(layer_idx * I40E_MAX_FLXPLD_FIED);
|
||||
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
|
||||
}
|
||||
|
||||
/* Set flex pit */
|
||||
for (i = 0; i < raw_id; i++) {
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
|
||||
flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
|
||||
pf->fdir.flex_set[field_idx].size,
|
||||
pf->fdir.flex_set[field_idx].dst_offset);
|
||||
|
||||
I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
|
||||
min_next_off = pf->fdir.flex_set[field_idx].src_offset +
|
||||
pf->fdir.flex_set[field_idx].size;
|
||||
}
|
||||
|
||||
for (; i < I40E_MAX_FLXPLD_FIED; i++) {
|
||||
/* set the non-used register obeying register's constrain */
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
|
||||
flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
|
||||
NONUSE_FLX_PIT_DEST_OFF);
|
||||
I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
|
||||
min_next_off++;
|
||||
}
|
||||
|
||||
pf->fdir.flex_pit_flag[layer_idx] = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
|
||||
enum i40e_filter_pctype pctype)
|
||||
{
|
||||
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
|
||||
struct i40e_fdir_flex_mask *flex_mask;
|
||||
uint32_t flxinset, fd_mask;
|
||||
uint8_t i;
|
||||
|
||||
/* Set flex mask */
|
||||
flex_mask = &pf->fdir.flex_mask[pctype];
|
||||
flxinset = (flex_mask->word_mask <<
|
||||
I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
|
||||
I40E_PRTQF_FD_FLXINSET_INSET_MASK;
|
||||
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
|
||||
|
||||
for (i = 0; i < flex_mask->nb_bitmask; i++) {
|
||||
fd_mask = (flex_mask->bitmask[i].mask <<
|
||||
I40E_PRTQF_FD_MSK_MASK_SHIFT) &
|
||||
I40E_PRTQF_FD_MSK_MASK_MASK;
|
||||
fd_mask |= ((flex_mask->bitmask[i].offset +
|
||||
I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
|
||||
I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
|
||||
I40E_PRTQF_FD_MSK_OFFSET_MASK;
|
||||
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
|
||||
}
|
||||
|
||||
pf->fdir.flex_mask_flag[pctype] = 1;
|
||||
}
|
||||
|
||||
static int
|
||||
i40e_flow_set_fdir_inset(struct i40e_pf *pf,
|
||||
enum i40e_filter_pctype pctype,
|
||||
@ -2607,18 +2461,15 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
|
||||
uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
|
||||
struct i40e_fdir_flex_pit flex_pit;
|
||||
uint8_t next_dst_off = 0;
|
||||
uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
|
||||
uint16_t flex_size;
|
||||
bool cfg_flex_pit = true;
|
||||
bool cfg_flex_msk = true;
|
||||
uint16_t ether_type;
|
||||
uint32_t vtc_flow_cpu;
|
||||
bool outer_ip = true;
|
||||
uint8_t field_idx;
|
||||
int ret;
|
||||
|
||||
memset(off_arr, 0, sizeof(off_arr));
|
||||
memset(len_arr, 0, sizeof(len_arr));
|
||||
memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
|
||||
filter->input.flow_ext.customized_pctype = false;
|
||||
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
|
||||
if (item->last) {
|
||||
@ -3176,6 +3027,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
|
||||
|
||||
flex_size = 0;
|
||||
memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
|
||||
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
|
||||
flex_pit.size =
|
||||
raw_spec->length / sizeof(uint16_t);
|
||||
flex_pit.dst_offset =
|
||||
@ -3202,27 +3054,21 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/* Store flex pit to SW */
|
||||
ret = i40e_flow_store_flex_pit(pf, &flex_pit,
|
||||
layer_idx, raw_id);
|
||||
if (ret < 0) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Conflict with the first flexible rule.");
|
||||
return -rte_errno;
|
||||
} else if (ret > 0)
|
||||
cfg_flex_pit = false;
|
||||
|
||||
for (i = 0; i < raw_spec->length; i++) {
|
||||
j = i + next_dst_off;
|
||||
filter->input.flow_ext.flexbytes[j] =
|
||||
raw_spec->pattern[i];
|
||||
flex_mask[j] = raw_mask->pattern[i];
|
||||
filter->input.flow_ext.flex_mask[j] =
|
||||
raw_mask->pattern[i];
|
||||
}
|
||||
|
||||
next_dst_off += raw_spec->length;
|
||||
raw_id++;
|
||||
|
||||
memcpy(&filter->input.flow_ext.flex_pit[field_idx],
|
||||
&flex_pit, sizeof(struct i40e_fdir_flex_pit));
|
||||
filter->input.flow_ext.layer_idx = layer_idx;
|
||||
filter->input.flow_ext.raw_id = raw_id;
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_VF:
|
||||
vf_spec = item->spec;
|
||||
@ -3308,29 +3154,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
|
||||
"Invalid pattern mask.");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/* Store flex mask to SW */
|
||||
ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
|
||||
if (ret == -1) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Exceed maximal number of bitmasks");
|
||||
return -rte_errno;
|
||||
} else if (ret == -2) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Conflict with the first flexible rule");
|
||||
return -rte_errno;
|
||||
} else if (ret > 0)
|
||||
cfg_flex_msk = false;
|
||||
|
||||
if (cfg_flex_pit)
|
||||
i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
|
||||
|
||||
if (cfg_flex_msk)
|
||||
i40e_flow_set_fdir_flex_msk(pf, pctype);
|
||||
}
|
||||
|
||||
filter->input.pctype = pctype;
|
||||
|
Loading…
Reference in New Issue
Block a user