net/ixgbe: parse flow director filter

check if the rule is a flow director rule, and get the flow director info.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Acked-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Wei Dai <wei.dai@intel.com>
This commit is contained in:
Wei Zhao 2017-01-13 16:13:09 +08:00 committed by Ferruh Yigit
parent 99e7003831
commit 11777435c7
4 changed files with 1388 additions and 83 deletions

View File

@ -1487,6 +1487,8 @@ static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
"Failed to allocate memory for fdir hash map!");
return -ENOMEM;
}
fdir_info->mask_added = FALSE;
return 0;
}

View File

@ -167,6 +167,17 @@ struct ixgbe_fdir_filter {
/* list of fdir filters */
TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
struct ixgbe_fdir_rule {
struct ixgbe_hw_fdir_mask mask;
union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
bool b_mask; /* If TRUE, mask has meaning. */
enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
uint32_t fdirflags; /* drop or forward */
uint32_t soft_id; /* an unique value for this rule */
uint8_t queue; /* assigned rx queue */
};
struct ixgbe_hw_fdir_info {
struct ixgbe_hw_fdir_mask mask;
uint8_t flex_bytes_offset;
@ -182,6 +193,7 @@ struct ixgbe_hw_fdir_info {
/* store the pointers of the filters, index is the hash value. */
struct ixgbe_fdir_filter **hash_map;
struct rte_hash *hash_handle; /* cuckoo hash handler */
bool mask_added; /* If already got mask from consistent filter */
};
/* structure for interrupt relative data */
@ -520,6 +532,10 @@ bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
* Flow director function prototypes
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
struct ixgbe_fdir_rule *rule,
bool del, bool update);
void ixgbe_configure_dcb(struct rte_eth_dev *dev);

View File

@ -112,10 +112,8 @@
static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
static int fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
@ -295,8 +293,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
fdir_set_input_mask_82599(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
fdir_set_input_mask_82599(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@ -308,8 +305,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
uint32_t fdirtcpm; /* TCP source and destination port masks. */
uint32_t fdiripv6m; /* IPv6 source and destination masks. */
uint16_t dst_ipv6m = 0;
uint16_t src_ipv6m = 0;
volatile uint32_t *reg;
PMD_INIT_FUNC_TRACE();
@ -320,31 +315,30 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*/
if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
else if (input_mask->vlan_tci_mask == 0)
else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = reverse_fdir_bitmasks(
rte_be_to_cpu_16(input_mask->dst_port_mask),
rte_be_to_cpu_16(input_mask->src_port_mask));
rte_be_to_cpu_16(info->mask.dst_port_mask),
rte_be_to_cpu_16(info->mask.src_port_mask));
/* write all the same so that UDP, TCP and SCTP use the same mask
* (little-endian)
@ -352,30 +346,23 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
info->mask.src_port_mask = input_mask->src_port_mask;
info->mask.dst_port_mask = input_mask->dst_port_mask;
/* Store source and destination IPv4 masks (big-endian),
* can not use IXGBE_WRITE_REG.
*/
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
*reg = ~(input_mask->ipv4_mask.src_ip);
*reg = ~(info->mask.src_ipv4_mask);
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
*reg = ~(input_mask->ipv4_mask.dst_ip);
info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
*reg = ~(info->mask.dst_ipv4_mask);
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
/*
* Store source and destination IPv6 masks (bit reversed)
*/
IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
info->mask.src_ipv6_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
info->mask.src_ipv6_mask = src_ipv6m;
info->mask.dst_ipv6_mask = dst_ipv6m;
}
return IXGBE_SUCCESS;
@ -386,8 +373,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
fdir_set_input_mask_x550(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
fdir_set_input_mask_x550(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@ -410,20 +396,19 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
/* some bits must be set for mac vlan or tunnel mode */
fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
else if (input_mask->vlan_tci_mask == 0)
else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
@ -434,12 +419,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
mac_mask = input_mask->mac_addr_byte_mask;
mac_mask = info->mask.mac_addr_byte_mask;
fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
& IXGBE_FDIRIP6M_INNER_MAC;
info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
switch (input_mask->tunnel_type_mask) {
switch (info->mask.tunnel_type_mask) {
case 0:
/* Mask turnnel type */
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
@ -450,10 +434,8 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
return -EINVAL;
}
info->mask.tunnel_type_mask =
input_mask->tunnel_type_mask;
switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
case 0x0:
/* Mask vxlan id */
fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
@ -467,8 +449,6 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
return -EINVAL;
}
info->mask.tunnel_id_mask =
input_mask->tunnel_id_mask;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
@ -482,22 +462,90 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
}
static int
fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
{
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint16_t dst_ipv6m = 0;
uint16_t src_ipv6m = 0;
memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
info->mask.src_port_mask = input_mask->src_port_mask;
info->mask.dst_port_mask = input_mask->dst_port_mask;
info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
info->mask.src_ipv6_mask = src_ipv6m;
info->mask.dst_ipv6_mask = dst_ipv6m;
return IXGBE_SUCCESS;
}
static int
ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
{
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
return IXGBE_SUCCESS;
}
static int
ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
{
enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
if (mode >= RTE_FDIR_MODE_SIGNATURE &&
mode <= RTE_FDIR_MODE_PERFECT)
return fdir_set_input_mask_82599(dev, input_mask);
return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
return fdir_set_input_mask_x550(dev, input_mask);
return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
return -ENOTSUP;
}
int
ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
{
enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
if (mode >= RTE_FDIR_MODE_SIGNATURE &&
mode <= RTE_FDIR_MODE_PERFECT)
return fdir_set_input_mask_82599(dev);
else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
return fdir_set_input_mask_x550(dev);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
return -ENOTSUP;
}
static int
fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask)
{
int ret;
ret = ixgbe_fdir_store_input_mask(dev, input_mask);
if (ret)
return ret;
return ixgbe_fdir_set_input_mask(dev);
}
/*
* ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
@ -1135,23 +1183,40 @@ ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
return 0;
}
/*
* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
* @dev: pointer to the structure rte_eth_dev
* @fdir_filter: fdir filter entry
* @del: 1 - delete, 0 - add
* @update: 1 - update
*/
static int
ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct ixgbe_fdir_rule *rule)
{
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
int err;
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
&rule->ixgbe_fdir,
fdir_mode);
if (err)
return err;
rule->mode = fdir_mode;
if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
rule->fdirflags = IXGBE_FDIRCMD_DROP;
rule->queue = fdir_filter->action.rx_queue;
rule->soft_id = fdir_filter->soft_id;
return 0;
}
int
ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
struct ixgbe_fdir_rule *rule,
bool del,
bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
uint32_t fdirhash;
union ixgbe_atr_input input;
uint8_t queue;
bool is_perfect = FALSE;
int err;
@ -1161,7 +1226,8 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
struct ixgbe_fdir_filter *node;
bool add_node = FALSE;
if (fdir_mode == RTE_FDIR_MODE_NONE)
if (fdir_mode == RTE_FDIR_MODE_NONE ||
fdir_mode != rule->mode)
return -ENOTSUP;
/*
@ -1174,7 +1240,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
(hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
(fdir_filter->input.flow_type ==
(rule->ixgbe_fdir.formatted.flow_type ==
RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
@ -1188,29 +1254,23 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
memset(&input, 0, sizeof(input));
err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
fdir_mode);
if (err)
return err;
if (is_perfect) {
if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
if (rule->ixgbe_fdir.formatted.flow_type &
IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
" perfect mode!");
return -ENOTSUP;
}
fdirhash = atr_compute_perfect_hash_82599(&input,
fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
fdirhash |= fdir_filter->soft_id <<
fdirhash |= rule->soft_id <<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
fdirhash = atr_compute_sig_hash_82599(&input,
fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
err = ixgbe_remove_fdir_filter(info, &input);
err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
if (err < 0)
return err;
@ -1223,7 +1283,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
}
/* add or update an fdir filter*/
fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
if (is_perfect) {
queue = dev->data->dev_conf.fdir_conf.drop_queue;
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
@ -1232,13 +1292,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
" signature mode.");
return -EINVAL;
}
} else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
queue = (uint8_t)fdir_filter->action.rx_queue;
} else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
queue = (uint8_t)rule->queue;
else
return -EINVAL;
node = ixgbe_fdir_filter_lookup(info, &input);
node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
if (node) {
if (update) {
node->fdirflags = fdircmd_flags;
@ -1256,7 +1315,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
if (!node)
return -ENOMEM;
(void)rte_memcpy(&node->ixgbe_fdir,
&input,
&rule->ixgbe_fdir,
sizeof(union ixgbe_atr_input));
node->fdirflags = fdircmd_flags;
node->fdirhash = fdirhash;
@ -1270,18 +1329,19 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
}
if (is_perfect) {
err = fdir_write_perfect_filter_82599(hw, &input, queue,
fdircmd_flags, fdirhash,
fdir_mode);
err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
queue, fdircmd_flags,
fdirhash, fdir_mode);
} else {
err = fdir_add_signature_filter_82599(hw, &input, queue,
fdircmd_flags, fdirhash);
err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
queue, fdircmd_flags,
fdirhash);
}
if (err < 0) {
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
if (add_node)
(void)ixgbe_remove_fdir_filter(info, &input);
(void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
} else {
PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
}
@ -1289,6 +1349,29 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
return err;
}
/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
* @dev: pointer to the structure rte_eth_dev
* @fdir_filter: fdir filter entry
* @del: 1 - delete, 0 - add
* @update: 1 - update
*/
static int
ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
bool del,
bool update)
{
struct ixgbe_fdir_rule rule;
int err;
err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
if (err)
return err;
return ixgbe_fdir_filter_program(dev, &rule, del, update);
}
static int
ixgbe_fdir_flush(struct rte_eth_dev *dev)
{
@ -1522,19 +1605,23 @@ ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
struct ixgbe_fdir_filter *fdir_filter;
struct ixgbe_fdir_filter *filter_flag;
int ret = 0;
/* flush flow director */
rte_hash_reset(fdir_info->hash_handle);
memset(fdir_info->hash_map, 0,
sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
TAILQ_REMOVE(&fdir_info->fdir_list,
fdir_filter,
entries);
rte_free(fdir_filter);
}
ret = ixgbe_fdir_flush(dev);
if (filter_flag != NULL)
ret = ixgbe_fdir_flush(dev);
return ret;
}

File diff suppressed because it is too large Load Diff