net/ixgbe: create consistent filter

This patch adds a function to create the flow directory filter.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Acked-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Wei Dai <wei.dai@intel.com>
This commit is contained in:
Wei Zhao 2017-01-13 16:13:10 +08:00 committed by Ferruh Yigit
parent 11777435c7
commit 72c135a89f
3 changed files with 266 additions and 14 deletions

View File

@ -309,9 +309,6 @@ static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
@ -321,17 +318,11 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter,
bool add);
static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add);
static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@ -1343,6 +1334,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize l2 tunnel filter list & hash */
ixgbe_l2_tn_filter_init(eth_dev);
TAILQ_INIT(&filter_ntuple_list);
TAILQ_INIT(&filter_ethertype_list);
TAILQ_INIT(&filter_syn_list);
TAILQ_INIT(&filter_fdir_list);
TAILQ_INIT(&filter_l2_tunnel_list);
TAILQ_INIT(&ixgbe_flow_list);
return 0;
}
@ -5956,7 +5955,7 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
return -ENOTSUP;\
} while (0)
static int
int
ixgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
@ -6335,7 +6334,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
* - On success, zero.
* - On failure, a negative value.
*/
static int
int
ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
@ -6480,7 +6479,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
return ret;
}
static int
int
ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
@ -7559,7 +7558,7 @@ ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
}
/* Add l2 tunnel filter */
static int
int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
bool restore)

View File

@ -334,6 +334,54 @@ struct ixgbe_l2_tn_info {
bool e_tag_ether_type; /* ether type for e-tag */
};
struct rte_flow {
enum rte_filter_type filter_type;
void *rule;
};
/* ntuple filter list structure */
struct ixgbe_ntuple_filter_ele {
TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
struct rte_eth_ntuple_filter filter_info;
};
/* ethertype filter list structure */
struct ixgbe_ethertype_filter_ele {
TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
struct rte_eth_ethertype_filter filter_info;
};
/* syn filter list structure */
struct ixgbe_eth_syn_filter_ele {
TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
struct rte_eth_syn_filter filter_info;
};
/* fdir filter list structure */
struct ixgbe_fdir_rule_ele {
TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
struct ixgbe_fdir_rule filter_info;
};
/* l2_tunnel filter list structure */
struct ixgbe_eth_l2_tunnel_conf_ele {
TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
struct rte_eth_l2_tunnel_conf filter_info;
};
/* ixgbe_flow memory list structure */
struct ixgbe_flow_mem {
TAILQ_ENTRY(ixgbe_flow_mem) entries;
struct rte_flow *flow;
};
TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
struct ixgbe_ntuple_filter_list filter_ntuple_list;
TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
struct ixgbe_ethertype_filter_list filter_ethertype_list;
TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
struct ixgbe_syn_filter_list filter_syn_list;
TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
struct ixgbe_fdir_rule_filter_list filter_fdir_list;
TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
struct ixgbe_flow_mem_list ixgbe_flow_list;
/*
* Statistics counters collected by the MACsec
*/
@ -528,6 +576,19 @@ uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter,
bool add);
int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add);
int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add);
int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
bool restore);
/*
* Flow director function prototypes
*/

View File

@ -157,10 +157,15 @@ ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
const struct rte_flow_ops ixgbe_flow_ops = {
ixgbe_flow_validate,
NULL,
ixgbe_flow_create,
NULL,
ixgbe_flow_flush,
NULL,
@ -2436,6 +2441,193 @@ ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
return ret;
}
/**
* Create or destroy a flow rule.
* Theorically one rule can match more than one filters.
* We will let it use the filter which it hitt first.
* So, the sequence matters.
*/
static struct rte_flow *
ixgbe_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
int ret;
struct rte_eth_ntuple_filter ntuple_filter;
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
struct ixgbe_fdir_rule fdir_rule;
struct rte_eth_l2_tunnel_conf l2_tn_filter;
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
struct rte_flow *flow = NULL;
struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
if (!flow) {
PMD_DRV_LOG(ERR, "failed to allocate memory");
return (struct rte_flow *)flow;
}
ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
sizeof(struct ixgbe_flow_mem), 0);
if (!ixgbe_flow_mem_ptr) {
PMD_DRV_LOG(ERR, "failed to allocate memory");
rte_free(flow);
return NULL;
}
ixgbe_flow_mem_ptr->flow = flow;
TAILQ_INSERT_TAIL(&ixgbe_flow_list,
ixgbe_flow_mem_ptr, entries);
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
ret = ixgbe_parse_ntuple_filter(attr, pattern,
actions, &ntuple_filter, error);
if (!ret) {
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
if (!ret) {
ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
sizeof(struct ixgbe_ntuple_filter_ele), 0);
(void)rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
sizeof(struct rte_eth_ntuple_filter));
TAILQ_INSERT_TAIL(&filter_ntuple_list,
ntuple_filter_ptr, entries);
flow->rule = ntuple_filter_ptr;
flow->filter_type = RTE_ETH_FILTER_NTUPLE;
return flow;
}
goto out;
}
memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
ret = ixgbe_parse_ethertype_filter(attr, pattern,
actions, &ethertype_filter, error);
if (!ret) {
ret = ixgbe_add_del_ethertype_filter(dev,
&ethertype_filter, TRUE);
if (!ret) {
ethertype_filter_ptr = rte_zmalloc(
"ixgbe_ethertype_filter",
sizeof(struct ixgbe_ethertype_filter_ele), 0);
(void)rte_memcpy(&ethertype_filter_ptr->filter_info,
&ethertype_filter,
sizeof(struct rte_eth_ethertype_filter));
TAILQ_INSERT_TAIL(&filter_ethertype_list,
ethertype_filter_ptr, entries);
flow->rule = ethertype_filter_ptr;
flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
return flow;
}
goto out;
}
memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
if (!ret) {
ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
if (!ret) {
syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
sizeof(struct ixgbe_eth_syn_filter_ele), 0);
(void)rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
sizeof(struct rte_eth_syn_filter));
TAILQ_INSERT_TAIL(&filter_syn_list,
syn_filter_ptr,
entries);
flow->rule = syn_filter_ptr;
flow->filter_type = RTE_ETH_FILTER_SYN;
return flow;
}
goto out;
}
memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
ret = ixgbe_parse_fdir_filter(attr, pattern,
actions, &fdir_rule, error);
if (!ret) {
/* A mask cannot be deleted. */
if (fdir_rule.b_mask) {
if (!fdir_info->mask_added) {
/* It's the first time the mask is set. */
rte_memcpy(&fdir_info->mask,
&fdir_rule.mask,
sizeof(struct ixgbe_hw_fdir_mask));
ret = ixgbe_fdir_set_input_mask(dev);
if (ret)
goto out;
fdir_info->mask_added = TRUE;
} else {
/**
* Only support one global mask,
* all the masks should be the same.
*/
ret = memcmp(&fdir_info->mask,
&fdir_rule.mask,
sizeof(struct ixgbe_hw_fdir_mask));
if (ret)
goto out;
}
}
if (fdir_rule.b_spec) {
ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
FALSE, FALSE);
if (!ret) {
fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
sizeof(struct ixgbe_fdir_rule_ele), 0);
(void)rte_memcpy(&fdir_rule_ptr->filter_info,
&fdir_rule,
sizeof(struct ixgbe_fdir_rule));
TAILQ_INSERT_TAIL(&filter_fdir_list,
fdir_rule_ptr, entries);
flow->rule = fdir_rule_ptr;
flow->filter_type = RTE_ETH_FILTER_FDIR;
return flow;
}
if (ret)
goto out;
}
goto out;
}
memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
ret = cons_parse_l2_tn_filter(attr, pattern,
actions, &l2_tn_filter, error);
if (!ret) {
ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
if (!ret) {
l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
(void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
&l2_tn_filter,
sizeof(struct rte_eth_l2_tunnel_conf));
TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
l2_tn_filter_ptr, entries);
flow->rule = l2_tn_filter_ptr;
flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
return flow;
}
}
out:
TAILQ_REMOVE(&ixgbe_flow_list,
ixgbe_flow_mem_ptr, entries);
rte_free(ixgbe_flow_mem_ptr);
rte_free(flow);
return NULL;
}
/**
* Check if the flow rule is supported by ixgbe.
* It only checkes the format. Don't guarantee the rule can be programmed into