net/igb: create consistent filter
This patch adds a function to create the flow directory filter. Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
This commit is contained in:
parent
7cd77faf71
commit
22bb13410c
@ -312,6 +312,53 @@ struct e1000_adapter {
|
||||
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
|
||||
(&((struct e1000_adapter *)adapter)->filter)
|
||||
|
||||
struct rte_flow {
|
||||
enum rte_filter_type filter_type;
|
||||
void *rule;
|
||||
};
|
||||
|
||||
/* ntuple filter list structure */
|
||||
struct igb_ntuple_filter_ele {
|
||||
TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
|
||||
struct rte_eth_ntuple_filter filter_info;
|
||||
};
|
||||
|
||||
/* ethertype filter list structure */
|
||||
struct igb_ethertype_filter_ele {
|
||||
TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
|
||||
struct rte_eth_ethertype_filter filter_info;
|
||||
};
|
||||
|
||||
/* syn filter list structure */
|
||||
struct igb_eth_syn_filter_ele {
|
||||
TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
|
||||
struct rte_eth_syn_filter filter_info;
|
||||
};
|
||||
|
||||
/* flex filter list structure */
|
||||
struct igb_flex_filter_ele {
|
||||
TAILQ_ENTRY(igb_flex_filter_ele) entries;
|
||||
struct rte_eth_flex_filter filter_info;
|
||||
};
|
||||
|
||||
/* igb_flow memory list structure */
|
||||
struct igb_flow_mem {
|
||||
TAILQ_ENTRY(igb_flow_mem) entries;
|
||||
struct rte_flow *flow;
|
||||
struct rte_eth_dev *dev;
|
||||
};
|
||||
|
||||
TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
|
||||
struct igb_ntuple_filter_list igb_filter_ntuple_list;
|
||||
TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
|
||||
struct igb_ethertype_filter_list igb_filter_ethertype_list;
|
||||
TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
|
||||
struct igb_syn_filter_list igb_filter_syn_list;
|
||||
TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
|
||||
struct igb_flex_filter_list igb_filter_flex_list;
|
||||
TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
|
||||
struct igb_flow_mem_list igb_flow_list;
|
||||
|
||||
extern const struct rte_flow_ops igb_flow_ops;
|
||||
|
||||
/*
|
||||
@ -432,4 +479,15 @@ void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
|
||||
void igb_pf_host_uninit(struct rte_eth_dev *dev);
|
||||
|
||||
int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *ntuple_filter, bool add);
|
||||
int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ethertype_filter *filter,
|
||||
bool add);
|
||||
int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
|
||||
struct rte_eth_syn_filter *filter,
|
||||
bool add);
|
||||
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_flex_filter *filter,
|
||||
bool add);
|
||||
#endif /* _E1000_ETHDEV_H_ */
|
||||
|
@ -213,9 +213,6 @@ static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
|
||||
struct rte_eth_rss_reta_entry64 *reta_conf,
|
||||
uint16_t reta_size);
|
||||
|
||||
static int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
|
||||
struct rte_eth_syn_filter *filter,
|
||||
bool add);
|
||||
static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
|
||||
struct rte_eth_syn_filter *filter);
|
||||
static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
|
||||
@ -225,9 +222,6 @@ static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *ntuple_filter);
|
||||
static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *ntuple_filter);
|
||||
static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_flex_filter *filter,
|
||||
bool add);
|
||||
static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_flex_filter *filter);
|
||||
static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
|
||||
@ -237,17 +231,11 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *ntuple_filter);
|
||||
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *ntuple_filter);
|
||||
static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *filter,
|
||||
bool add);
|
||||
static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *filter);
|
||||
static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
|
||||
enum rte_filter_op filter_op,
|
||||
void *arg);
|
||||
static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ethertype_filter *filter,
|
||||
bool add);
|
||||
static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
|
||||
enum rte_filter_op filter_op,
|
||||
void *arg);
|
||||
@ -955,6 +943,12 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
|
||||
TAILQ_INIT(&filter_info->twotuple_list);
|
||||
TAILQ_INIT(&filter_info->fivetuple_list);
|
||||
|
||||
TAILQ_INIT(&igb_filter_ntuple_list);
|
||||
TAILQ_INIT(&igb_filter_ethertype_list);
|
||||
TAILQ_INIT(&igb_filter_syn_list);
|
||||
TAILQ_INIT(&igb_filter_flex_list);
|
||||
TAILQ_INIT(&igb_flow_list);
|
||||
|
||||
return 0;
|
||||
|
||||
err_late:
|
||||
@ -3580,7 +3574,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
eth_igb_syn_filter_set(struct rte_eth_dev *dev,
|
||||
struct rte_eth_syn_filter *filter,
|
||||
bool add)
|
||||
@ -4492,7 +4486,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
* - On success, zero.
|
||||
* - On failure, a negative value.
|
||||
*/
|
||||
static int
|
||||
int
|
||||
igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
|
||||
struct rte_eth_ntuple_filter *ntuple_filter,
|
||||
bool add)
|
||||
|
@ -1269,6 +1269,141 @@ igb_parse_flex_filter(struct rte_eth_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a flow rule.
|
||||
* Theorically one rule can match more than one filters.
|
||||
* We will let it use the filter which it hitt first.
|
||||
* So, the sequence matters.
|
||||
*/
|
||||
static struct rte_flow *
|
||||
igb_flow_create(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_attr *attr,
|
||||
const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
int ret;
|
||||
struct rte_eth_ntuple_filter ntuple_filter;
|
||||
struct rte_eth_ethertype_filter ethertype_filter;
|
||||
struct rte_eth_syn_filter syn_filter;
|
||||
struct rte_eth_flex_filter flex_filter;
|
||||
struct rte_flow *flow = NULL;
|
||||
struct igb_ntuple_filter_ele *ntuple_filter_ptr;
|
||||
struct igb_ethertype_filter_ele *ethertype_filter_ptr;
|
||||
struct igb_eth_syn_filter_ele *syn_filter_ptr;
|
||||
struct igb_flex_filter_ele *flex_filter_ptr;
|
||||
struct igb_flow_mem *igb_flow_mem_ptr;
|
||||
|
||||
flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
|
||||
if (!flow) {
|
||||
PMD_DRV_LOG(ERR, "failed to allocate memory");
|
||||
return (struct rte_flow *)flow;
|
||||
}
|
||||
igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
|
||||
sizeof(struct igb_flow_mem), 0);
|
||||
if (!igb_flow_mem_ptr) {
|
||||
PMD_DRV_LOG(ERR, "failed to allocate memory");
|
||||
rte_free(flow);
|
||||
return NULL;
|
||||
}
|
||||
igb_flow_mem_ptr->flow = flow;
|
||||
igb_flow_mem_ptr->dev = dev;
|
||||
TAILQ_INSERT_TAIL(&igb_flow_list,
|
||||
igb_flow_mem_ptr, entries);
|
||||
|
||||
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
|
||||
ret = igb_parse_ntuple_filter(dev, attr, pattern,
|
||||
actions, &ntuple_filter, error);
|
||||
if (!ret) {
|
||||
ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
|
||||
if (!ret) {
|
||||
ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
|
||||
sizeof(struct igb_ntuple_filter_ele), 0);
|
||||
(void)rte_memcpy(&ntuple_filter_ptr->filter_info,
|
||||
&ntuple_filter,
|
||||
sizeof(struct rte_eth_ntuple_filter));
|
||||
TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
|
||||
ntuple_filter_ptr, entries);
|
||||
flow->rule = ntuple_filter_ptr;
|
||||
flow->filter_type = RTE_ETH_FILTER_NTUPLE;
|
||||
return flow;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
|
||||
ret = igb_parse_ethertype_filter(dev, attr, pattern,
|
||||
actions, ðertype_filter, error);
|
||||
if (!ret) {
|
||||
ret = igb_add_del_ethertype_filter(dev,
|
||||
ðertype_filter, TRUE);
|
||||
if (!ret) {
|
||||
ethertype_filter_ptr = rte_zmalloc(
|
||||
"igb_ethertype_filter",
|
||||
sizeof(struct igb_ethertype_filter_ele), 0);
|
||||
(void)rte_memcpy(ðertype_filter_ptr->filter_info,
|
||||
ðertype_filter,
|
||||
sizeof(struct rte_eth_ethertype_filter));
|
||||
TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
|
||||
ethertype_filter_ptr, entries);
|
||||
flow->rule = ethertype_filter_ptr;
|
||||
flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
|
||||
return flow;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
|
||||
ret = igb_parse_syn_filter(dev, attr, pattern,
|
||||
actions, &syn_filter, error);
|
||||
if (!ret) {
|
||||
ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
|
||||
if (!ret) {
|
||||
syn_filter_ptr = rte_zmalloc("igb_syn_filter",
|
||||
sizeof(struct igb_eth_syn_filter_ele), 0);
|
||||
(void)rte_memcpy(&syn_filter_ptr->filter_info,
|
||||
&syn_filter,
|
||||
sizeof(struct rte_eth_syn_filter));
|
||||
TAILQ_INSERT_TAIL(&igb_filter_syn_list,
|
||||
syn_filter_ptr,
|
||||
entries);
|
||||
flow->rule = syn_filter_ptr;
|
||||
flow->filter_type = RTE_ETH_FILTER_SYN;
|
||||
return flow;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
|
||||
ret = igb_parse_flex_filter(dev, attr, pattern,
|
||||
actions, &flex_filter, error);
|
||||
if (!ret) {
|
||||
ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
|
||||
if (!ret) {
|
||||
flex_filter_ptr = rte_zmalloc("igb_flex_filter",
|
||||
sizeof(struct igb_flex_filter_ele), 0);
|
||||
(void)rte_memcpy(&flex_filter_ptr->filter_info,
|
||||
&flex_filter,
|
||||
sizeof(struct rte_eth_flex_filter));
|
||||
TAILQ_INSERT_TAIL(&igb_filter_flex_list,
|
||||
flex_filter_ptr, entries);
|
||||
flow->rule = flex_filter_ptr;
|
||||
flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
|
||||
return flow;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
TAILQ_REMOVE(&igb_flow_list,
|
||||
igb_flow_mem_ptr, entries);
|
||||
rte_flow_error_set(error, -ret,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
||||
"Failed to create flow.");
|
||||
rte_free(igb_flow_mem_ptr);
|
||||
rte_free(flow);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the flow rule is supported by igb.
|
||||
* It only checkes the format. Don't guarantee the rule can be programmed into
|
||||
@ -1314,7 +1449,7 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
|
||||
|
||||
const struct rte_flow_ops igb_flow_ops = {
|
||||
igb_flow_validate,
|
||||
NULL,
|
||||
igb_flow_create,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
|
Loading…
Reference in New Issue
Block a user