net/ice: rework for generic flow enabling
The patch reworks the generic flow API (rte_flow) implementation. It introduces an abstract layer which provides a unified interface for low-level filter engine (switch, fdir, hash) to register supported patterns and actions and implement flow validate/create/destroy/flush/ query activities. The patch also removes the existing switch filter implementation to avoid compile error. Switch filter implementation for the new framework will be added in the following patch. Signed-off-by: Ying A Wang <ying.a.wang@intel.com> Acked-by: Qi Zhang <qi.z.zhang@intel.com> Reviewed-by: Xiaolong Ye <xiaolong.ye@intel.com>
This commit is contained in:
parent
0998c89a85
commit
7615a68950
@ -15,7 +15,7 @@
|
||||
#include "base/ice_dcb.h"
|
||||
#include "ice_ethdev.h"
|
||||
#include "ice_rxtx.h"
|
||||
#include "ice_switch_filter.h"
|
||||
#include "ice_generic_flow.h"
|
||||
|
||||
/* devargs */
|
||||
#define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
|
||||
@ -2009,7 +2009,11 @@ ice_dev_init(struct rte_eth_dev *dev)
|
||||
/* get base queue pairs index in the device */
|
||||
ice_base_queue_get(pf);
|
||||
|
||||
TAILQ_INIT(&pf->flow_list);
|
||||
ret = ice_flow_init(ad);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "Failed to initialize flow");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@ -2131,7 +2135,8 @@ ice_dev_close(struct rte_eth_dev *dev)
|
||||
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||
struct rte_flow *p_flow;
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
|
||||
/* Since stop will make link down, then the link event will be
|
||||
* triggered, disable the irq firstly to avoid the port_infoe etc
|
||||
@ -2142,6 +2147,8 @@ ice_dev_close(struct rte_eth_dev *dev)
|
||||
|
||||
ice_dev_stop(dev);
|
||||
|
||||
ice_flow_uninit(ad);
|
||||
|
||||
/* release all queue resource */
|
||||
ice_free_queues(dev);
|
||||
|
||||
@ -2167,13 +2174,6 @@ ice_dev_close(struct rte_eth_dev *dev)
|
||||
/* unregister callback func from eal lib */
|
||||
rte_intr_callback_unregister(intr_handle,
|
||||
ice_interrupt_handler, dev);
|
||||
|
||||
/* Remove all flows */
|
||||
while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
|
||||
TAILQ_REMOVE(&pf->flow_list, p_flow, node);
|
||||
ice_free_switch_filter_rule(p_flow->rule);
|
||||
rte_free(p_flow);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -241,16 +241,12 @@ struct ice_vsi {
|
||||
bool offset_loaded;
|
||||
};
|
||||
|
||||
extern const struct rte_flow_ops ice_flow_ops;
|
||||
|
||||
/* Struct to store flow created. */
|
||||
struct rte_flow {
|
||||
TAILQ_ENTRY(rte_flow) node;
|
||||
void *rule;
|
||||
};
|
||||
|
||||
struct rte_flow;
|
||||
TAILQ_HEAD(ice_flow_list, rte_flow);
|
||||
|
||||
struct ice_flow_parser_node;
|
||||
TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);
|
||||
|
||||
struct ice_pf {
|
||||
struct ice_adapter *adapter; /* The adapter this PF associate to */
|
||||
struct ice_vsi *main_vsi; /* pointer to main VSI structure */
|
||||
@ -279,6 +275,9 @@ struct ice_pf {
|
||||
bool offset_loaded;
|
||||
bool adapter_stopped;
|
||||
struct ice_flow_list flow_list;
|
||||
struct ice_parser_list rss_parser_list;
|
||||
struct ice_parser_list perm_parser_list;
|
||||
struct ice_parser_list dist_parser_list;
|
||||
};
|
||||
|
||||
#define ICE_MAX_QUEUE_NUM 2048
|
||||
|
@ -17,7 +17,19 @@
|
||||
|
||||
#include "ice_ethdev.h"
|
||||
#include "ice_generic_flow.h"
|
||||
#include "ice_switch_filter.h"
|
||||
|
||||
/**
|
||||
* Non-pipeline mode, fdir and switch both used as distributor,
|
||||
* fdir used first, switch used as fdir's backup.
|
||||
*/
|
||||
#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0
|
||||
/*Pipeline mode, switch used at permission stage*/
|
||||
#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1
|
||||
/*Pipeline mode, fdir used at distributor stage*/
|
||||
#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
|
||||
|
||||
static struct ice_engine_list engine_list =
|
||||
TAILQ_HEAD_INITIALIZER(engine_list);
|
||||
|
||||
static int ice_flow_validate(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_attr *attr,
|
||||
@ -34,16 +46,175 @@ static int ice_flow_destroy(struct rte_eth_dev *dev,
|
||||
struct rte_flow_error *error);
|
||||
static int ice_flow_flush(struct rte_eth_dev *dev,
|
||||
struct rte_flow_error *error);
|
||||
static int ice_flow_query(struct rte_eth_dev *dev,
|
||||
struct rte_flow *flow,
|
||||
const struct rte_flow_action *actions,
|
||||
void *data,
|
||||
struct rte_flow_error *error);
|
||||
|
||||
const struct rte_flow_ops ice_flow_ops = {
|
||||
.validate = ice_flow_validate,
|
||||
.create = ice_flow_create,
|
||||
.destroy = ice_flow_destroy,
|
||||
.flush = ice_flow_flush,
|
||||
.query = ice_flow_query,
|
||||
};
|
||||
|
||||
void
|
||||
ice_register_flow_engine(struct ice_flow_engine *engine)
|
||||
{
|
||||
TAILQ_INSERT_TAIL(&engine_list, engine, node);
|
||||
}
|
||||
|
||||
int
|
||||
ice_flow_init(struct ice_adapter *ad)
|
||||
{
|
||||
int ret;
|
||||
struct ice_pf *pf = &ad->pf;
|
||||
void *temp;
|
||||
struct ice_flow_engine *engine;
|
||||
|
||||
TAILQ_INIT(&pf->flow_list);
|
||||
TAILQ_INIT(&pf->rss_parser_list);
|
||||
TAILQ_INIT(&pf->perm_parser_list);
|
||||
TAILQ_INIT(&pf->dist_parser_list);
|
||||
|
||||
TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
|
||||
if (engine->init == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
|
||||
engine->type);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
ret = engine->init(ad);
|
||||
if (ret) {
|
||||
PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
|
||||
engine->type);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ice_flow_uninit(struct ice_adapter *ad)
|
||||
{
|
||||
struct ice_pf *pf = &ad->pf;
|
||||
struct ice_flow_engine *engine;
|
||||
struct rte_flow *p_flow;
|
||||
struct ice_flow_parser_node *p_parser;
|
||||
void *temp;
|
||||
|
||||
TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
|
||||
if (engine->uninit)
|
||||
engine->uninit(ad);
|
||||
}
|
||||
|
||||
/* Remove all flows */
|
||||
while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
|
||||
TAILQ_REMOVE(&pf->flow_list, p_flow, node);
|
||||
if (p_flow->engine->free)
|
||||
p_flow->engine->free(p_flow);
|
||||
rte_free(p_flow);
|
||||
}
|
||||
|
||||
/* Cleanup parser list */
|
||||
while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list))) {
|
||||
TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
|
||||
rte_free(p_parser);
|
||||
}
|
||||
|
||||
while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list))) {
|
||||
TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
|
||||
rte_free(p_parser);
|
||||
}
|
||||
|
||||
while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list))) {
|
||||
TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
|
||||
rte_free(p_parser);
|
||||
}
|
||||
}
|
||||
|
||||
static struct ice_parser_list *
|
||||
ice_get_parser_list(struct ice_flow_parser *parser,
|
||||
struct ice_adapter *ad)
|
||||
{
|
||||
struct ice_parser_list *list;
|
||||
struct ice_pf *pf = &ad->pf;
|
||||
|
||||
switch (parser->stage) {
|
||||
case ICE_FLOW_STAGE_RSS:
|
||||
list = &pf->rss_parser_list;
|
||||
break;
|
||||
case ICE_FLOW_STAGE_PERMISSION:
|
||||
list = &pf->perm_parser_list;
|
||||
break;
|
||||
case ICE_FLOW_STAGE_DISTRIBUTOR:
|
||||
list = &pf->dist_parser_list;
|
||||
break;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
int
|
||||
ice_register_parser(struct ice_flow_parser *parser,
|
||||
struct ice_adapter *ad)
|
||||
{
|
||||
struct ice_parser_list *list;
|
||||
struct ice_flow_parser_node *parser_node;
|
||||
|
||||
parser_node = rte_zmalloc("ice_parser", sizeof(*parser_node), 0);
|
||||
if (parser_node == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate memory.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
parser_node->parser = parser;
|
||||
|
||||
list = ice_get_parser_list(parser, ad);
|
||||
if (list == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (ad->devargs.pipe_mode_support) {
|
||||
TAILQ_INSERT_TAIL(list, parser_node, node);
|
||||
} else {
|
||||
if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH ||
|
||||
parser->engine->type == ICE_FLOW_ENGINE_HASH)
|
||||
TAILQ_INSERT_TAIL(list, parser_node, node);
|
||||
else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
|
||||
TAILQ_INSERT_HEAD(list, parser_node, node);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
ice_unregister_parser(struct ice_flow_parser *parser,
|
||||
struct ice_adapter *ad)
|
||||
{
|
||||
struct ice_parser_list *list;
|
||||
struct ice_flow_parser_node *p_parser;
|
||||
void *temp;
|
||||
|
||||
list = ice_get_parser_list(parser, ad);
|
||||
if (list == NULL)
|
||||
return;
|
||||
|
||||
TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
|
||||
if (p_parser->parser->engine->type == parser->engine->type) {
|
||||
TAILQ_REMOVE(list, p_parser, node);
|
||||
rte_free(p_parser);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
ice_flow_valid_attr(const struct rte_flow_attr *attr,
|
||||
ice_flow_valid_attr(struct ice_adapter *ad,
|
||||
const struct rte_flow_attr *attr,
|
||||
int *ice_pipeline_stage,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
/* Must be input direction */
|
||||
@ -62,12 +233,24 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr,
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/* Not supported */
|
||||
if (attr->priority) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
|
||||
attr, "Not support priority.");
|
||||
return -rte_errno;
|
||||
/* Check pipeline mode support to set classification stage */
|
||||
if (ad->devargs.pipe_mode_support) {
|
||||
if (attr->priority == 0)
|
||||
*ice_pipeline_stage =
|
||||
ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
|
||||
else
|
||||
*ice_pipeline_stage =
|
||||
ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
|
||||
} else {
|
||||
*ice_pipeline_stage =
|
||||
ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
|
||||
/* Not supported */
|
||||
if (attr->priority) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
|
||||
attr, "Not support priority.");
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
|
||||
/* Not supported */
|
||||
@ -150,11 +333,15 @@ ice_match_pattern(enum rte_flow_item_type *item_array,
|
||||
item->type == RTE_FLOW_ITEM_TYPE_END);
|
||||
}
|
||||
|
||||
static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
|
||||
struct ice_pattern_match_item *
|
||||
ice_search_pattern_match_item(const struct rte_flow_item pattern[],
|
||||
struct ice_pattern_match_item *array,
|
||||
uint32_t array_len,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
uint16_t i = 0;
|
||||
uint64_t inset;
|
||||
struct ice_pattern_match_item *pattern_match_item;
|
||||
/* need free by each filter */
|
||||
struct rte_flow_item *items; /* used for pattern without VOID items */
|
||||
uint32_t item_num = 0; /* non-void item number */
|
||||
|
||||
@ -171,401 +358,76 @@ static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
|
||||
if (!items) {
|
||||
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
|
||||
NULL, "No memory for PMD internal items.");
|
||||
return -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
pattern_match_item = rte_zmalloc("ice_pattern_match_item",
|
||||
sizeof(struct ice_pattern_match_item), 0);
|
||||
if (!pattern_match_item) {
|
||||
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL, "Failed to allocate memory.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ice_pattern_skip_void_item(items, pattern);
|
||||
|
||||
for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
|
||||
if (ice_match_pattern(ice_supported_patterns[i].items,
|
||||
items)) {
|
||||
inset = ice_supported_patterns[i].sw_fields;
|
||||
for (i = 0; i < array_len; i++)
|
||||
if (ice_match_pattern(array[i].pattern_list,
|
||||
items)) {
|
||||
pattern_match_item->input_set_mask =
|
||||
array[i].input_set_mask;
|
||||
pattern_match_item->pattern_list =
|
||||
array[i].pattern_list;
|
||||
pattern_match_item->meta = array[i].meta;
|
||||
rte_free(items);
|
||||
return inset;
|
||||
return pattern_match_item;
|
||||
}
|
||||
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
pattern, "Unsupported pattern");
|
||||
|
||||
rte_free(items);
|
||||
return 0;
|
||||
rte_free(pattern_match_item);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
|
||||
struct rte_flow_error *error)
|
||||
static struct ice_flow_engine *
|
||||
ice_parse_engine(struct ice_adapter *ad,
|
||||
struct ice_parser_list *parser_list,
|
||||
const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
void **meta,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_item *item = pattern;
|
||||
const struct rte_flow_item_eth *eth_spec, *eth_mask;
|
||||
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
|
||||
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
|
||||
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
|
||||
const struct rte_flow_item_udp *udp_spec, *udp_mask;
|
||||
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
|
||||
const struct rte_flow_item_icmp *icmp_mask;
|
||||
const struct rte_flow_item_icmp6 *icmp6_mask;
|
||||
const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
|
||||
const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
|
||||
enum rte_flow_item_type item_type;
|
||||
uint8_t ipv6_addr_mask[16] = {
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
|
||||
uint64_t input_set = ICE_INSET_NONE;
|
||||
bool is_tunnel = false;
|
||||
struct ice_flow_engine *engine = NULL;
|
||||
struct ice_flow_parser_node *parser_node;
|
||||
void *temp;
|
||||
|
||||
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
|
||||
if (item->last) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Not support range");
|
||||
return 0;
|
||||
}
|
||||
item_type = item->type;
|
||||
switch (item_type) {
|
||||
case RTE_FLOW_ITEM_TYPE_ETH:
|
||||
eth_spec = item->spec;
|
||||
eth_mask = item->mask;
|
||||
TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
|
||||
if (parser_node->parser->parse_pattern_action(ad,
|
||||
parser_node->parser->array,
|
||||
parser_node->parser->array_len,
|
||||
pattern, actions, meta, error) < 0)
|
||||
continue;
|
||||
|
||||
if (eth_spec && eth_mask) {
|
||||
if (rte_is_broadcast_ether_addr(ð_mask->src))
|
||||
input_set |= ICE_INSET_SMAC;
|
||||
if (rte_is_broadcast_ether_addr(ð_mask->dst))
|
||||
input_set |= ICE_INSET_DMAC;
|
||||
if (eth_mask->type == RTE_BE16(0xffff))
|
||||
input_set |= ICE_INSET_ETHERTYPE;
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ipv4_spec = item->spec;
|
||||
ipv4_mask = item->mask;
|
||||
|
||||
if (!(ipv4_spec && ipv4_mask))
|
||||
break;
|
||||
|
||||
/* Check IPv4 mask and update input set */
|
||||
if (ipv4_mask->hdr.version_ihl ||
|
||||
ipv4_mask->hdr.total_length ||
|
||||
ipv4_mask->hdr.packet_id ||
|
||||
ipv4_mask->hdr.hdr_checksum) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid IPv4 mask.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_tunnel) {
|
||||
if (ipv4_mask->hdr.src_addr == UINT32_MAX)
|
||||
input_set |= ICE_INSET_TUN_IPV4_SRC;
|
||||
if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
|
||||
input_set |= ICE_INSET_TUN_IPV4_DST;
|
||||
if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
|
||||
input_set |= ICE_INSET_TUN_IPV4_TTL;
|
||||
if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
|
||||
input_set |= ICE_INSET_TUN_IPV4_PROTO;
|
||||
} else {
|
||||
if (ipv4_mask->hdr.src_addr == UINT32_MAX)
|
||||
input_set |= ICE_INSET_IPV4_SRC;
|
||||
if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
|
||||
input_set |= ICE_INSET_IPV4_DST;
|
||||
if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
|
||||
input_set |= ICE_INSET_IPV4_TTL;
|
||||
if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
|
||||
input_set |= ICE_INSET_IPV4_PROTO;
|
||||
if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
|
||||
input_set |= ICE_INSET_IPV4_TOS;
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ipv6_spec = item->spec;
|
||||
ipv6_mask = item->mask;
|
||||
|
||||
if (!(ipv6_spec && ipv6_mask))
|
||||
break;
|
||||
|
||||
if (ipv6_mask->hdr.payload_len) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid IPv6 mask");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_tunnel) {
|
||||
if (!memcmp(ipv6_mask->hdr.src_addr,
|
||||
ipv6_addr_mask,
|
||||
RTE_DIM(ipv6_mask->hdr.src_addr)))
|
||||
input_set |= ICE_INSET_TUN_IPV6_SRC;
|
||||
if (!memcmp(ipv6_mask->hdr.dst_addr,
|
||||
ipv6_addr_mask,
|
||||
RTE_DIM(ipv6_mask->hdr.dst_addr)))
|
||||
input_set |= ICE_INSET_TUN_IPV6_DST;
|
||||
if (ipv6_mask->hdr.proto == UINT8_MAX)
|
||||
input_set |= ICE_INSET_TUN_IPV6_PROTO;
|
||||
if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
|
||||
input_set |= ICE_INSET_TUN_IPV6_TTL;
|
||||
} else {
|
||||
if (!memcmp(ipv6_mask->hdr.src_addr,
|
||||
ipv6_addr_mask,
|
||||
RTE_DIM(ipv6_mask->hdr.src_addr)))
|
||||
input_set |= ICE_INSET_IPV6_SRC;
|
||||
if (!memcmp(ipv6_mask->hdr.dst_addr,
|
||||
ipv6_addr_mask,
|
||||
RTE_DIM(ipv6_mask->hdr.dst_addr)))
|
||||
input_set |= ICE_INSET_IPV6_DST;
|
||||
if (ipv6_mask->hdr.proto == UINT8_MAX)
|
||||
input_set |= ICE_INSET_IPV6_PROTO;
|
||||
if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
|
||||
input_set |= ICE_INSET_IPV6_HOP_LIMIT;
|
||||
if ((ipv6_mask->hdr.vtc_flow &
|
||||
rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
|
||||
== rte_cpu_to_be_32
|
||||
(RTE_IPV6_HDR_TC_MASK))
|
||||
input_set |= ICE_INSET_IPV6_TOS;
|
||||
}
|
||||
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_UDP:
|
||||
udp_spec = item->spec;
|
||||
udp_mask = item->mask;
|
||||
|
||||
if (!(udp_spec && udp_mask))
|
||||
break;
|
||||
|
||||
/* Check UDP mask and update input set*/
|
||||
if (udp_mask->hdr.dgram_len ||
|
||||
udp_mask->hdr.dgram_cksum) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid UDP mask");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_tunnel) {
|
||||
if (udp_mask->hdr.src_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_TUN_SRC_PORT;
|
||||
if (udp_mask->hdr.dst_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_TUN_DST_PORT;
|
||||
} else {
|
||||
if (udp_mask->hdr.src_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_SRC_PORT;
|
||||
if (udp_mask->hdr.dst_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_DST_PORT;
|
||||
}
|
||||
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_TCP:
|
||||
tcp_spec = item->spec;
|
||||
tcp_mask = item->mask;
|
||||
|
||||
if (!(tcp_spec && tcp_mask))
|
||||
break;
|
||||
|
||||
/* Check TCP mask and update input set */
|
||||
if (tcp_mask->hdr.sent_seq ||
|
||||
tcp_mask->hdr.recv_ack ||
|
||||
tcp_mask->hdr.data_off ||
|
||||
tcp_mask->hdr.tcp_flags ||
|
||||
tcp_mask->hdr.rx_win ||
|
||||
tcp_mask->hdr.cksum ||
|
||||
tcp_mask->hdr.tcp_urp) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid TCP mask");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_tunnel) {
|
||||
if (tcp_mask->hdr.src_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_TUN_SRC_PORT;
|
||||
if (tcp_mask->hdr.dst_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_TUN_DST_PORT;
|
||||
} else {
|
||||
if (tcp_mask->hdr.src_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_SRC_PORT;
|
||||
if (tcp_mask->hdr.dst_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_DST_PORT;
|
||||
}
|
||||
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_SCTP:
|
||||
sctp_spec = item->spec;
|
||||
sctp_mask = item->mask;
|
||||
|
||||
if (!(sctp_spec && sctp_mask))
|
||||
break;
|
||||
|
||||
/* Check SCTP mask and update input set */
|
||||
if (sctp_mask->hdr.cksum) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid SCTP mask");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_tunnel) {
|
||||
if (sctp_mask->hdr.src_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_TUN_SRC_PORT;
|
||||
if (sctp_mask->hdr.dst_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_TUN_DST_PORT;
|
||||
} else {
|
||||
if (sctp_mask->hdr.src_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_SRC_PORT;
|
||||
if (sctp_mask->hdr.dst_port == UINT16_MAX)
|
||||
input_set |= ICE_INSET_DST_PORT;
|
||||
}
|
||||
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_ICMP:
|
||||
icmp_mask = item->mask;
|
||||
if (icmp_mask->hdr.icmp_code ||
|
||||
icmp_mask->hdr.icmp_cksum ||
|
||||
icmp_mask->hdr.icmp_ident ||
|
||||
icmp_mask->hdr.icmp_seq_nb) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid ICMP mask");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (icmp_mask->hdr.icmp_type == UINT8_MAX)
|
||||
input_set |= ICE_INSET_ICMP;
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_ICMP6:
|
||||
icmp6_mask = item->mask;
|
||||
if (icmp6_mask->code ||
|
||||
icmp6_mask->checksum) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid ICMP6 mask");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (icmp6_mask->type == UINT8_MAX)
|
||||
input_set |= ICE_INSET_ICMP6;
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_VXLAN:
|
||||
vxlan_spec = item->spec;
|
||||
vxlan_mask = item->mask;
|
||||
/* Check if VXLAN item is used to describe protocol.
|
||||
* If yes, both spec and mask should be NULL.
|
||||
* If no, both spec and mask shouldn't be NULL.
|
||||
*/
|
||||
if ((!vxlan_spec && vxlan_mask) ||
|
||||
(vxlan_spec && !vxlan_mask)) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid VXLAN item");
|
||||
return 0;
|
||||
}
|
||||
if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
|
||||
vxlan_mask->vni[1] == UINT8_MAX &&
|
||||
vxlan_mask->vni[2] == UINT8_MAX)
|
||||
input_set |= ICE_INSET_TUN_ID;
|
||||
is_tunnel = 1;
|
||||
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_NVGRE:
|
||||
nvgre_spec = item->spec;
|
||||
nvgre_mask = item->mask;
|
||||
/* Check if NVGRE item is used to describe protocol.
|
||||
* If yes, both spec and mask should be NULL.
|
||||
* If no, both spec and mask shouldn't be NULL.
|
||||
*/
|
||||
if ((!nvgre_spec && nvgre_mask) ||
|
||||
(nvgre_spec && !nvgre_mask)) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid NVGRE item");
|
||||
return 0;
|
||||
}
|
||||
if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
|
||||
nvgre_mask->tni[1] == UINT8_MAX &&
|
||||
nvgre_mask->tni[2] == UINT8_MAX)
|
||||
input_set |= ICE_INSET_TUN_ID;
|
||||
is_tunnel = 1;
|
||||
|
||||
break;
|
||||
case RTE_FLOW_ITEM_TYPE_VOID:
|
||||
break;
|
||||
default:
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
item,
|
||||
"Invalid pattern");
|
||||
break;
|
||||
}
|
||||
engine = parser_node->parser->engine;
|
||||
break;
|
||||
}
|
||||
return input_set;
|
||||
}
|
||||
|
||||
static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
|
||||
uint64_t inset, struct rte_flow_error *error)
|
||||
{
|
||||
uint64_t fields;
|
||||
|
||||
/* get valid field */
|
||||
fields = ice_get_flow_field(pattern, error);
|
||||
if (!fields || fields & (~inset)) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
|
||||
pattern,
|
||||
"Invalid input set");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ice_flow_valid_action(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_action *actions,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct rte_flow_action_queue *act_q;
|
||||
uint16_t queue;
|
||||
const struct rte_flow_action *action;
|
||||
for (action = actions; action->type !=
|
||||
RTE_FLOW_ACTION_TYPE_END; action++) {
|
||||
switch (action->type) {
|
||||
case RTE_FLOW_ACTION_TYPE_QUEUE:
|
||||
act_q = action->conf;
|
||||
queue = act_q->index;
|
||||
if (queue >= dev->data->nb_rx_queues) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION,
|
||||
actions, "Invalid queue ID for"
|
||||
" switch filter.");
|
||||
return -rte_errno;
|
||||
}
|
||||
break;
|
||||
case RTE_FLOW_ACTION_TYPE_DROP:
|
||||
case RTE_FLOW_ACTION_TYPE_VOID:
|
||||
break;
|
||||
default:
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION, actions,
|
||||
"Invalid action.");
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return engine;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_flow_validate(struct rte_eth_dev *dev,
|
||||
ice_flow_validate_filter(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_attr *attr,
|
||||
const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
struct ice_flow_engine **engine,
|
||||
void **meta,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
uint64_t inset = 0;
|
||||
int ret = ICE_ERR_NOT_SUPPORTED;
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
int ice_pipeline_stage = 0;
|
||||
|
||||
if (!pattern) {
|
||||
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
|
||||
@ -587,25 +449,49 @@ ice_flow_validate(struct rte_eth_dev *dev,
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
ret = ice_flow_valid_attr(attr, error);
|
||||
ret = ice_flow_valid_attr(ad, attr, &ice_pipeline_stage, error);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
inset = ice_flow_valid_pattern(pattern, error);
|
||||
if (!inset)
|
||||
return -rte_errno;
|
||||
*engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions,
|
||||
meta, error);
|
||||
if (*engine != NULL)
|
||||
return 0;
|
||||
|
||||
ret = ice_flow_valid_inset(pattern, inset, error);
|
||||
if (ret)
|
||||
return ret;
|
||||
switch (ice_pipeline_stage) {
|
||||
case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
|
||||
case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
|
||||
*engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern,
|
||||
actions, meta, error);
|
||||
break;
|
||||
case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
|
||||
*engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern,
|
||||
actions, meta, error);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ice_flow_valid_action(dev, actions, error);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (*engine == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_flow_validate(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_attr *attr,
|
||||
const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
void *meta;
|
||||
struct ice_flow_engine *engine;
|
||||
|
||||
return ice_flow_validate_filter(dev, attr, pattern, actions,
|
||||
&engine, &meta, error);
|
||||
}
|
||||
|
||||
static struct rte_flow *
|
||||
ice_flow_create(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_attr *attr,
|
||||
@ -616,6 +502,10 @@ ice_flow_create(struct rte_eth_dev *dev,
|
||||
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
struct rte_flow *flow = NULL;
|
||||
int ret;
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct ice_flow_engine *engine = NULL;
|
||||
void *meta;
|
||||
|
||||
flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
|
||||
if (!flow) {
|
||||
@ -625,21 +515,28 @@ ice_flow_create(struct rte_eth_dev *dev,
|
||||
return flow;
|
||||
}
|
||||
|
||||
ret = ice_flow_validate(dev, attr, pattern, actions, error);
|
||||
ret = ice_flow_validate_filter(dev, attr, pattern, actions,
|
||||
&engine, &meta, error);
|
||||
if (ret < 0)
|
||||
goto free_flow;
|
||||
|
||||
ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
|
||||
if (engine->create == NULL) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL, "Invalid engine");
|
||||
goto free_flow;
|
||||
}
|
||||
|
||||
ret = engine->create(ad, flow, meta, error);
|
||||
if (ret)
|
||||
goto free_flow;
|
||||
|
||||
flow->engine = engine;
|
||||
TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
|
||||
return flow;
|
||||
|
||||
free_flow:
|
||||
rte_flow_error_set(error, -ret,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
||||
"Failed to create flow.");
|
||||
PMD_DRV_LOG(ERR, "Failed to create flow");
|
||||
rte_free(flow);
|
||||
return NULL;
|
||||
}
|
||||
@ -650,17 +547,24 @@ ice_flow_destroy(struct rte_eth_dev *dev,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
int ret = 0;
|
||||
|
||||
ret = ice_destroy_switch_filter(pf, flow, error);
|
||||
if (!flow || !flow->engine || !flow->engine->destroy) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL, "Invalid flow");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
ret = flow->engine->destroy(ad, flow, error);
|
||||
|
||||
if (!ret) {
|
||||
TAILQ_REMOVE(&pf->flow_list, flow, node);
|
||||
rte_free(flow);
|
||||
} else {
|
||||
rte_flow_error_set(error, -ret,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
||||
"Failed to destroy flow.");
|
||||
PMD_DRV_LOG(ERR, "Failed to destroy flow");
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -678,12 +582,46 @@ ice_flow_flush(struct rte_eth_dev *dev,
|
||||
TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
|
||||
ret = ice_flow_destroy(dev, p_flow, error);
|
||||
if (ret) {
|
||||
rte_flow_error_set(error, -ret,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
||||
"Failed to flush SW flows.");
|
||||
return -rte_errno;
|
||||
PMD_DRV_LOG(ERR, "Failed to flush flows");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_flow_query(struct rte_eth_dev *dev,
|
||||
struct rte_flow *flow,
|
||||
const struct rte_flow_action *actions,
|
||||
void *data,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct ice_adapter *ad =
|
||||
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct rte_flow_query_count *count = data;
|
||||
|
||||
if (!flow || !flow->engine || !flow->engine->query_count) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL, "Invalid flow");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
|
||||
switch (actions->type) {
|
||||
case RTE_FLOW_ACTION_TYPE_VOID:
|
||||
break;
|
||||
case RTE_FLOW_ACTION_TYPE_COUNT:
|
||||
ret = flow->engine->query_count(ad, flow, count, error);
|
||||
break;
|
||||
default:
|
||||
return rte_flow_error_set(error, ENOTSUP,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION,
|
||||
actions,
|
||||
"action not supported");
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -7,615 +7,278 @@
|
||||
|
||||
#include <rte_flow_driver.h>
|
||||
|
||||
struct ice_flow_pattern {
|
||||
enum rte_flow_item_type *items;
|
||||
uint64_t sw_fields;
|
||||
/* protocol */
|
||||
|
||||
#define ICE_PROT_MAC_INNER (1ULL << 1)
|
||||
#define ICE_PROT_MAC_OUTER (1ULL << 2)
|
||||
#define ICE_PROT_VLAN_INNER (1ULL << 3)
|
||||
#define ICE_PROT_VLAN_OUTER (1ULL << 4)
|
||||
#define ICE_PROT_IPV4_INNER (1ULL << 5)
|
||||
#define ICE_PROT_IPV4_OUTER (1ULL << 6)
|
||||
#define ICE_PROT_IPV6_INNER (1ULL << 7)
|
||||
#define ICE_PROT_IPV6_OUTER (1ULL << 8)
|
||||
#define ICE_PROT_TCP_INNER (1ULL << 9)
|
||||
#define ICE_PROT_TCP_OUTER (1ULL << 10)
|
||||
#define ICE_PROT_UDP_INNER (1ULL << 11)
|
||||
#define ICE_PROT_UDP_OUTER (1ULL << 12)
|
||||
#define ICE_PROT_SCTP_INNER (1ULL << 13)
|
||||
#define ICE_PROT_SCTP_OUTER (1ULL << 14)
|
||||
#define ICE_PROT_ICMP4_INNER (1ULL << 15)
|
||||
#define ICE_PROT_ICMP4_OUTER (1ULL << 16)
|
||||
#define ICE_PROT_ICMP6_INNER (1ULL << 17)
|
||||
#define ICE_PROT_ICMP6_OUTER (1ULL << 18)
|
||||
#define ICE_PROT_VXLAN (1ULL << 19)
|
||||
#define ICE_PROT_NVGRE (1ULL << 20)
|
||||
#define ICE_PROT_GTPU (1ULL << 21)
|
||||
|
||||
/* field */
|
||||
|
||||
#define ICE_SMAC (1ULL << 63)
|
||||
#define ICE_DMAC (1ULL << 62)
|
||||
#define ICE_ETHERTYPE (1ULL << 61)
|
||||
#define ICE_IP_SRC (1ULL << 60)
|
||||
#define ICE_IP_DST (1ULL << 59)
|
||||
#define ICE_IP_PROTO (1ULL << 58)
|
||||
#define ICE_IP_TTL (1ULL << 57)
|
||||
#define ICE_IP_TOS (1ULL << 56)
|
||||
#define ICE_SPORT (1ULL << 55)
|
||||
#define ICE_DPORT (1ULL << 54)
|
||||
#define ICE_ICMP_TYPE (1ULL << 53)
|
||||
#define ICE_ICMP_CODE (1ULL << 52)
|
||||
#define ICE_VXLAN_VNI (1ULL << 51)
|
||||
#define ICE_NVGRE_TNI (1ULL << 50)
|
||||
#define ICE_GTPU_TEID (1ULL << 49)
|
||||
#define ICE_GTPU_QFI (1ULL << 48)
|
||||
|
||||
/* input set */
|
||||
|
||||
#define ICE_INSET_NONE 0ULL
|
||||
|
||||
/* non-tunnel */
|
||||
|
||||
#define ICE_INSET_SMAC (ICE_PROT_MAC_OUTER | ICE_SMAC)
|
||||
#define ICE_INSET_DMAC (ICE_PROT_MAC_OUTER | ICE_DMAC)
|
||||
#define ICE_INSET_VLAN_INNER (ICE_PROT_VLAN_INNER)
|
||||
#define ICE_INSET_VLAN_OUTER (ICE_PROT_VLAN_OUTER)
|
||||
#define ICE_INSET_ETHERTYPE (ICE_ETHERTYPE)
|
||||
|
||||
#define ICE_INSET_IPV4_SRC \
|
||||
(ICE_PROT_IPV4_OUTER | ICE_IP_SRC)
|
||||
#define ICE_INSET_IPV4_DST \
|
||||
(ICE_PROT_IPV4_OUTER | ICE_IP_DST)
|
||||
#define ICE_INSET_IPV4_TOS \
|
||||
(ICE_PROT_IPV4_OUTER | ICE_IP_TOS)
|
||||
#define ICE_INSET_IPV4_PROTO \
|
||||
(ICE_PROT_IPV4_OUTER | ICE_IP_PROTO)
|
||||
#define ICE_INSET_IPV4_TTL \
|
||||
(ICE_PROT_IPV4_OUTER | ICE_IP_TTL)
|
||||
#define ICE_INSET_IPV6_SRC \
|
||||
(ICE_PROT_IPV6_OUTER | ICE_IP_SRC)
|
||||
#define ICE_INSET_IPV6_DST \
|
||||
(ICE_PROT_IPV6_OUTER | ICE_IP_DST)
|
||||
#define ICE_INSET_IPV6_NEXT_HDR \
|
||||
(ICE_PROT_IPV6_OUTER | ICE_IP_PROTO)
|
||||
#define ICE_INSET_IPV6_HOP_LIMIT \
|
||||
(ICE_PROT_IPV6_OUTER | ICE_IP_TTL)
|
||||
#define ICE_INSET_IPV6_TC \
|
||||
(ICE_PROT_IPV6_OUTER | ICE_IP_TOS)
|
||||
|
||||
#define ICE_INSET_TCP_SRC_PORT \
|
||||
(ICE_PROT_TCP_OUTER | ICE_SPORT)
|
||||
#define ICE_INSET_TCP_DST_PORT \
|
||||
(ICE_PROT_TCP_OUTER | ICE_DPORT)
|
||||
#define ICE_INSET_UDP_SRC_PORT \
|
||||
(ICE_PROT_UDP_OUTER | ICE_SPORT)
|
||||
#define ICE_INSET_UDP_DST_PORT \
|
||||
(ICE_PROT_UDP_OUTER | ICE_DPORT)
|
||||
#define ICE_INSET_SCTP_SRC_PORT \
|
||||
(ICE_PROT_SCTP_OUTER | ICE_SPORT)
|
||||
#define ICE_INSET_SCTP_DST_PORT \
|
||||
(ICE_PROT_SCTP_OUTER | ICE_DPORT)
|
||||
#define ICE_INSET_ICMP4_SRC_PORT \
|
||||
(ICE_PROT_ICMP4_OUTER | ICE_SPORT)
|
||||
#define ICE_INSET_ICMP4_DST_PORT \
|
||||
(ICE_PROT_ICMP4_OUTER | ICE_DPORT)
|
||||
#define ICE_INSET_ICMP6_SRC_PORT \
|
||||
(ICE_PROT_ICMP6_OUTER | ICE_SPORT)
|
||||
#define ICE_INSET_ICMP6_DST_PORT \
|
||||
(ICE_PROT_ICMP6_OUTER | ICE_DPORT)
|
||||
#define ICE_INSET_ICMP4_TYPE \
|
||||
(ICE_PROT_ICMP4_OUTER | ICE_ICMP_TYPE)
|
||||
#define ICE_INSET_ICMP4_CODE \
|
||||
(ICE_PROT_ICMP4_OUTER | ICE_ICMP_CODE)
|
||||
#define ICE_INSET_ICMP6_TYPE \
|
||||
(ICE_PROT_ICMP6_OUTER | ICE_ICMP_TYPE)
|
||||
#define ICE_INSET_ICMP6_CODE \
|
||||
(ICE_PROT_ICMP6_OUTER | ICE_ICMP_CODE)
|
||||
|
||||
/* tunnel */
|
||||
|
||||
#define ICE_INSET_TUN_SMAC \
|
||||
(ICE_PROT_MAC_INNER | ICE_SMAC)
|
||||
#define ICE_INSET_TUN_DMAC \
|
||||
(ICE_PROT_MAC_INNER | ICE_DMAC)
|
||||
|
||||
#define ICE_INSET_TUN_IPV4_SRC \
|
||||
(ICE_PROT_IPV4_INNER | ICE_IP_SRC)
|
||||
#define ICE_INSET_TUN_IPV4_DST \
|
||||
(ICE_PROT_IPV4_INNER | ICE_IP_DST)
|
||||
#define ICE_INSET_TUN_IPV4_TTL \
|
||||
(ICE_PROT_IPV4_INNER | ICE_IP_TTL)
|
||||
#define ICE_INSET_TUN_IPV4_PROTO \
|
||||
(ICE_PROT_IPV4_INNER | ICE_IP_PROTO)
|
||||
#define ICE_INSET_TUN_IPV4_TOS \
|
||||
(ICE_PROT_IPV4_INNER | ICE_IP_TOS)
|
||||
#define ICE_INSET_TUN_IPV6_SRC \
|
||||
(ICE_PROT_IPV6_INNER | ICE_IP_SRC)
|
||||
#define ICE_INSET_TUN_IPV6_DST \
|
||||
(ICE_PROT_IPV6_INNER | ICE_IP_DST)
|
||||
#define ICE_INSET_TUN_IPV6_HOP_LIMIT \
|
||||
(ICE_PROT_IPV6_INNER | ICE_IP_TTL)
|
||||
#define ICE_INSET_TUN_IPV6_NEXT_HDR \
|
||||
(ICE_PROT_IPV6_INNER | ICE_IP_PROTO)
|
||||
#define ICE_INSET_TUN_IPV6_TC \
|
||||
(ICE_PROT_IPV6_INNER | ICE_IP_TOS)
|
||||
|
||||
#define ICE_INSET_TUN_TCP_SRC_PORT \
|
||||
(ICE_PROT_TCP_INNER | ICE_SPORT)
|
||||
#define ICE_INSET_TUN_TCP_DST_PORT \
|
||||
(ICE_PROT_TCP_INNER | ICE_DPORT)
|
||||
#define ICE_INSET_TUN_UDP_SRC_PORT \
|
||||
(ICE_PROT_UDP_INNER | ICE_SPORT)
|
||||
#define ICE_INSET_TUN_UDP_DST_PORT \
|
||||
(ICE_PROT_UDP_INNER | ICE_DPORT)
|
||||
#define ICE_INSET_TUN_SCTP_SRC_PORT \
|
||||
(ICE_PROT_SCTP_INNER | ICE_SPORT)
|
||||
#define ICE_INSET_TUN_SCTP_DST_PORT \
|
||||
(ICE_PROT_SCTP_INNER | ICE_DPORT)
|
||||
#define ICE_INSET_TUN_ICMP4_SRC_PORT \
|
||||
(ICE_PROT_ICMP4_INNER | ICE_SPORT)
|
||||
#define ICE_INSET_TUN_ICMP4_DST_PORT \
|
||||
(ICE_PROT_ICMP4_INNER | ICE_DPORT)
|
||||
#define ICE_INSET_TUN_ICMP6_SRC_PORT \
|
||||
(ICE_PROT_ICMP6_INNER | ICE_SPORT)
|
||||
#define ICE_INSET_TUN_ICMP6_DST_PORT \
|
||||
(ICE_PROT_ICMP6_INNER | ICE_DPORT)
|
||||
#define ICE_INSET_TUN_ICMP4_TYPE \
|
||||
(ICE_PROT_ICMP4_INNER | ICE_ICMP_TYPE)
|
||||
#define ICE_INSET_TUN_ICMP4_CODE \
|
||||
(ICE_PROT_ICMP4_INNER | ICE_ICMP_CODE)
|
||||
#define ICE_INSET_TUN_ICMP6_TYPE \
|
||||
(ICE_PROT_ICMP6_INNER | ICE_ICMP_TYPE)
|
||||
#define ICE_INSET_TUN_ICMP6_CODE \
|
||||
(ICE_PROT_ICMP6_INNER | ICE_ICMP_CODE)
|
||||
|
||||
#define ICE_INSET_TUN_VXLAN_VNI \
|
||||
(ICE_PROT_VXLAN | ICE_VXLAN_VNI)
|
||||
#define ICE_INSET_TUN_NVGRE_TNI \
|
||||
(ICE_PROT_NVGRE | ICE_NVGRE_TNI)
|
||||
#define ICE_INSET_GTPU_TEID \
|
||||
(ICE_PROT_GTPU | ICE_GTPU_TEID)
|
||||
#define ICE_INSET_GTPU_QFI \
|
||||
(ICE_PROT_GTPU | ICE_GTPU_QFI)
|
||||
|
||||
struct ice_adapter;
|
||||
|
||||
extern const struct rte_flow_ops ice_flow_ops;
|
||||
|
||||
/* engine types. */
|
||||
enum ice_flow_engine_type {
|
||||
ICE_FLOW_ENGINE_NONE = 0,
|
||||
ICE_FLOW_ENGINE_FDIR,
|
||||
ICE_FLOW_ENGINE_SWITCH,
|
||||
ICE_FLOW_ENGINE_HASH,
|
||||
ICE_FLOW_ENGINE_ACL,
|
||||
ICE_FLOW_ENGINE_MAX,
|
||||
};
|
||||
|
||||
#define ICE_INSET_NONE 0x00000000000000000ULL
|
||||
|
||||
/* bit0 ~ bit 7 */
|
||||
#define ICE_INSET_SMAC 0x0000000000000001ULL
|
||||
#define ICE_INSET_DMAC 0x0000000000000002ULL
|
||||
#define ICE_INSET_ETHERTYPE 0x0000000000000020ULL
|
||||
|
||||
/* bit 8 ~ bit 15 */
|
||||
#define ICE_INSET_IPV4_SRC 0x0000000000000100ULL
|
||||
#define ICE_INSET_IPV4_DST 0x0000000000000200ULL
|
||||
#define ICE_INSET_IPV6_SRC 0x0000000000000400ULL
|
||||
#define ICE_INSET_IPV6_DST 0x0000000000000800ULL
|
||||
#define ICE_INSET_SRC_PORT 0x0000000000001000ULL
|
||||
#define ICE_INSET_DST_PORT 0x0000000000002000ULL
|
||||
#define ICE_INSET_ARP 0x0000000000004000ULL
|
||||
|
||||
/* bit 16 ~ bit 31 */
|
||||
#define ICE_INSET_IPV4_TOS 0x0000000000010000ULL
|
||||
#define ICE_INSET_IPV4_PROTO 0x0000000000020000ULL
|
||||
#define ICE_INSET_IPV4_TTL 0x0000000000040000ULL
|
||||
#define ICE_INSET_IPV6_TOS 0x0000000000100000ULL
|
||||
#define ICE_INSET_IPV6_PROTO 0x0000000000200000ULL
|
||||
#define ICE_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
|
||||
#define ICE_INSET_ICMP 0x0000000001000000ULL
|
||||
#define ICE_INSET_ICMP6 0x0000000002000000ULL
|
||||
|
||||
/* bit 32 ~ bit 47, tunnel fields */
|
||||
#define ICE_INSET_TUN_SMAC 0x0000000100000000ULL
|
||||
#define ICE_INSET_TUN_DMAC 0x0000000200000000ULL
|
||||
#define ICE_INSET_TUN_IPV4_SRC 0x0000000400000000ULL
|
||||
#define ICE_INSET_TUN_IPV4_DST 0x0000000800000000ULL
|
||||
#define ICE_INSET_TUN_IPV4_TTL 0x0000001000000000ULL
|
||||
#define ICE_INSET_TUN_IPV4_PROTO 0x0000002000000000ULL
|
||||
#define ICE_INSET_TUN_IPV6_SRC 0x0000004000000000ULL
|
||||
#define ICE_INSET_TUN_IPV6_DST 0x0000008000000000ULL
|
||||
#define ICE_INSET_TUN_IPV6_TTL 0x0000010000000000ULL
|
||||
#define ICE_INSET_TUN_IPV6_PROTO 0x0000020000000000ULL
|
||||
#define ICE_INSET_TUN_SRC_PORT 0x0000040000000000ULL
|
||||
#define ICE_INSET_TUN_DST_PORT 0x0000080000000000ULL
|
||||
#define ICE_INSET_TUN_ID 0x0000100000000000ULL
|
||||
|
||||
/* bit 48 ~ bit 55 */
|
||||
#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
|
||||
|
||||
#define ICE_FLAG_VLAN_INNER 0x00000001ULL
|
||||
#define ICE_FLAG_VLAN_OUTER 0x00000002ULL
|
||||
|
||||
#define INSET_ETHER ( \
|
||||
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
|
||||
#define INSET_MAC_IPV4 ( \
|
||||
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
|
||||
ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
|
||||
#define INSET_MAC_IPV4_L4 ( \
|
||||
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
|
||||
ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
|
||||
ICE_INSET_SRC_PORT)
|
||||
#define INSET_MAC_IPV4_ICMP ( \
|
||||
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
|
||||
ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
|
||||
#define INSET_MAC_IPV6 ( \
|
||||
ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
|
||||
ICE_INSET_IPV6_TOS | ICE_INSET_IPV6_HOP_LIMIT)
|
||||
#define INSET_MAC_IPV6_L4 ( \
|
||||
ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
|
||||
ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | \
|
||||
ICE_INSET_DST_PORT | ICE_INSET_SRC_PORT)
|
||||
#define INSET_MAC_IPV6_ICMP ( \
|
||||
ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
|
||||
ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | ICE_INSET_ICMP6)
|
||||
#define INSET_TUNNEL_IPV4_TYPE1 ( \
|
||||
ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
|
||||
ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
|
||||
ICE_INSET_TUN_ID)
|
||||
#define INSET_TUNNEL_IPV4_TYPE2 ( \
|
||||
ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
|
||||
ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
|
||||
ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \
|
||||
ICE_INSET_TUN_ID)
|
||||
#define INSET_TUNNEL_IPV4_TYPE3 ( \
|
||||
ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
|
||||
ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP | \
|
||||
ICE_INSET_TUN_ID)
|
||||
#define INSET_TUNNEL_IPV6_TYPE1 ( \
|
||||
ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
|
||||
ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
|
||||
ICE_INSET_TUN_ID)
|
||||
#define INSET_TUNNEL_IPV6_TYPE2 ( \
|
||||
ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
|
||||
ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
|
||||
ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT | \
|
||||
ICE_INSET_TUN_ID)
|
||||
#define INSET_TUNNEL_IPV6_TYPE3 ( \
|
||||
ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
|
||||
ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6 | \
|
||||
ICE_INSET_TUN_ID)
|
||||
|
||||
/* L2 */
|
||||
static enum rte_flow_item_type pattern_ethertype[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
/**
|
||||
* classification stages.
|
||||
* for non-pipeline mode, we have two classification stages: Distributor/RSS
|
||||
* for pipeline-mode we have three classification stages:
|
||||
* Permission/Distributor/RSS
|
||||
*/
|
||||
enum ice_flow_classification_stage {
|
||||
ICE_FLOW_STAGE_NONE = 0,
|
||||
ICE_FLOW_STAGE_RSS,
|
||||
ICE_FLOW_STAGE_PERMISSION,
|
||||
ICE_FLOW_STAGE_DISTRIBUTOR,
|
||||
ICE_FLOW_STAGE_MAX,
|
||||
};
|
||||
/* pattern structure */
|
||||
struct ice_pattern_match_item {
|
||||
enum rte_flow_item_type *pattern_list;
|
||||
/* pattern_list must end with RTE_FLOW_ITEM_TYPE_END */
|
||||
uint64_t input_set_mask;
|
||||
void *meta;
|
||||
};
|
||||
|
||||
/* non-tunnel IPv4 */
|
||||
static enum rte_flow_item_type pattern_ipv4[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
typedef int (*engine_init_t)(struct ice_adapter *ad);
|
||||
typedef void (*engine_uninit_t)(struct ice_adapter *ad);
|
||||
typedef int (*engine_create_t)(struct ice_adapter *ad,
|
||||
struct rte_flow *flow,
|
||||
void *meta,
|
||||
struct rte_flow_error *error);
|
||||
typedef int (*engine_destroy_t)(struct ice_adapter *ad,
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error);
|
||||
typedef int (*engine_query_t)(struct ice_adapter *ad,
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_query_count *count,
|
||||
struct rte_flow_error *error);
|
||||
typedef void (*engine_free_t) (struct rte_flow *flow);
|
||||
typedef int (*parse_pattern_action_t)(struct ice_adapter *ad,
|
||||
struct ice_pattern_match_item *array,
|
||||
uint32_t array_len,
|
||||
const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
void **meta,
|
||||
struct rte_flow_error *error);
|
||||
|
||||
/* Struct to store engine created. */
|
||||
struct ice_flow_engine {
|
||||
TAILQ_ENTRY(ice_flow_engine) node;
|
||||
engine_init_t init;
|
||||
engine_uninit_t uninit;
|
||||
engine_create_t create;
|
||||
engine_destroy_t destroy;
|
||||
engine_query_t query_count;
|
||||
engine_free_t free;
|
||||
enum ice_flow_engine_type type;
|
||||
};
|
||||
TAILQ_HEAD(ice_engine_list, ice_flow_engine);
|
||||
|
||||
/* Struct to store flow created. */
|
||||
struct rte_flow {
|
||||
TAILQ_ENTRY(rte_flow) node;
|
||||
struct ice_flow_engine *engine;
|
||||
void *rule;
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
struct ice_flow_parser {
|
||||
struct ice_flow_engine *engine;
|
||||
struct ice_pattern_match_item *array;
|
||||
uint32_t array_len;
|
||||
parse_pattern_action_t parse_pattern_action;
|
||||
enum ice_flow_classification_stage stage;
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_icmp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* non-tunnel IPv6 */
|
||||
static enum rte_flow_item_type pattern_ipv6[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv6_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv6_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv6_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP6,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* IPv4 VXLAN IPv4 */
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* IPv4 VXLAN MAC IPv4 */
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* IPv4 VXLAN IPv6 */
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* IPv4 VXLAN MAC IPv6 */
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_VXLAN,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* IPv4 NVGRE IPv4 */
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* IPv4 NVGRE MAC IPv4 */
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_ICMP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
/* IPv4 NVGRE IPv6 */
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
|
||||
/* IPv4 NVGRE MAC IPv6 */
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_UDP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_TCP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV4,
|
||||
RTE_FLOW_ITEM_TYPE_NVGRE,
|
||||
RTE_FLOW_ITEM_TYPE_ETH,
|
||||
RTE_FLOW_ITEM_TYPE_IPV6,
|
||||
RTE_FLOW_ITEM_TYPE_SCTP,
|
||||
RTE_FLOW_ITEM_TYPE_END,
|
||||
};
|
||||
|
||||
static struct ice_flow_pattern ice_supported_patterns[] = {
|
||||
{pattern_ethertype, INSET_ETHER},
|
||||
{pattern_ipv4, INSET_MAC_IPV4},
|
||||
{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
|
||||
{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
|
||||
{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
|
||||
{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
|
||||
{pattern_ipv6, INSET_MAC_IPV6},
|
||||
{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
|
||||
{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
|
||||
{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
|
||||
{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
|
||||
{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
|
||||
{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
|
||||
{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
|
||||
{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
|
||||
{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
|
||||
{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
|
||||
{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
|
||||
{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
|
||||
{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
|
||||
{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
|
||||
{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
|
||||
{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
|
||||
{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
|
||||
{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
|
||||
{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
|
||||
{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
|
||||
/* Struct to store parser created. */
|
||||
struct ice_flow_parser_node {
|
||||
TAILQ_ENTRY(ice_flow_parser_node) node;
|
||||
struct ice_flow_parser *parser;
|
||||
};
|
||||
|
||||
void ice_register_flow_engine(struct ice_flow_engine *engine);
|
||||
int ice_flow_init(struct ice_adapter *ad);
|
||||
void ice_flow_uninit(struct ice_adapter *ad);
|
||||
int ice_register_parser(struct ice_flow_parser *parser,
|
||||
struct ice_adapter *ad);
|
||||
void ice_unregister_parser(struct ice_flow_parser *parser,
|
||||
struct ice_adapter *ad);
|
||||
struct ice_pattern_match_item *
|
||||
ice_search_pattern_match_item(const struct rte_flow_item pattern[],
|
||||
struct ice_pattern_match_item *array,
|
||||
uint32_t array_len,
|
||||
struct rte_flow_error *error);
|
||||
#endif
|
||||
|
@ -2,515 +2,3 @@
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <sys/queue.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <rte_debug.h>
|
||||
#include <rte_ether.h>
|
||||
#include <rte_ethdev_driver.h>
|
||||
#include <rte_log.h>
|
||||
#include <rte_malloc.h>
|
||||
#include <rte_eth_ctrl.h>
|
||||
#include <rte_tailq.h>
|
||||
#include <rte_flow_driver.h>
|
||||
|
||||
#include "ice_logs.h"
|
||||
#include "base/ice_type.h"
|
||||
#include "ice_switch_filter.h"
|
||||
|
||||
static int
|
||||
ice_parse_switch_filter(const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
struct rte_flow_error *error,
|
||||
struct ice_adv_lkup_elem *list,
|
||||
uint16_t *lkups_num,
|
||||
enum ice_sw_tunnel_type tun_type)
|
||||
{
|
||||
const struct rte_flow_item *item = pattern;
|
||||
enum rte_flow_item_type item_type;
|
||||
const struct rte_flow_item_eth *eth_spec, *eth_mask;
|
||||
const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
|
||||
const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
|
||||
const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
|
||||
const struct rte_flow_item_udp *udp_spec, *udp_mask;
|
||||
const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
|
||||
const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
|
||||
const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
|
||||
uint16_t j, t = 0;
|
||||
uint16_t tunnel_valid = 0;
|
||||
|
||||
for (item = pattern; item->type !=
|
||||
RTE_FLOW_ITEM_TYPE_END; item++) {
|
||||
item_type = item->type;
|
||||
|
||||
switch (item_type) {
|
||||
case RTE_FLOW_ITEM_TYPE_ETH:
|
||||
eth_spec = item->spec;
|
||||
eth_mask = item->mask;
|
||||
if (eth_spec && eth_mask) {
|
||||
list[t].type = (tun_type == ICE_NON_TUN) ?
|
||||
ICE_MAC_OFOS : ICE_MAC_IL;
|
||||
struct ice_ether_hdr *h;
|
||||
struct ice_ether_hdr *m;
|
||||
uint16_t i = 0;
|
||||
h = &list[t].h_u.eth_hdr;
|
||||
m = &list[t].m_u.eth_hdr;
|
||||
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
|
||||
if (eth_mask->src.addr_bytes[j] ==
|
||||
UINT8_MAX) {
|
||||
h->src_addr[j] =
|
||||
eth_spec->src.addr_bytes[j];
|
||||
m->src_addr[j] =
|
||||
eth_mask->src.addr_bytes[j];
|
||||
i = 1;
|
||||
}
|
||||
if (eth_mask->dst.addr_bytes[j] ==
|
||||
UINT8_MAX) {
|
||||
h->dst_addr[j] =
|
||||
eth_spec->dst.addr_bytes[j];
|
||||
m->dst_addr[j] =
|
||||
eth_mask->dst.addr_bytes[j];
|
||||
i = 1;
|
||||
}
|
||||
}
|
||||
if (i)
|
||||
t++;
|
||||
if (eth_mask->type == UINT16_MAX) {
|
||||
list[t].type = ICE_ETYPE_OL;
|
||||
list[t].h_u.ethertype.ethtype_id =
|
||||
eth_spec->type;
|
||||
list[t].m_u.ethertype.ethtype_id =
|
||||
UINT16_MAX;
|
||||
t++;
|
||||
}
|
||||
} else if (!eth_spec && !eth_mask) {
|
||||
list[t].type = (tun_type == ICE_NON_TUN) ?
|
||||
ICE_MAC_OFOS : ICE_MAC_IL;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_IPV4:
|
||||
ipv4_spec = item->spec;
|
||||
ipv4_mask = item->mask;
|
||||
if (ipv4_spec && ipv4_mask) {
|
||||
list[t].type = (tun_type == ICE_NON_TUN) ?
|
||||
ICE_IPV4_OFOS : ICE_IPV4_IL;
|
||||
if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
|
||||
list[t].h_u.ipv4_hdr.src_addr =
|
||||
ipv4_spec->hdr.src_addr;
|
||||
list[t].m_u.ipv4_hdr.src_addr =
|
||||
UINT32_MAX;
|
||||
}
|
||||
if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
|
||||
list[t].h_u.ipv4_hdr.dst_addr =
|
||||
ipv4_spec->hdr.dst_addr;
|
||||
list[t].m_u.ipv4_hdr.dst_addr =
|
||||
UINT32_MAX;
|
||||
}
|
||||
if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
|
||||
list[t].h_u.ipv4_hdr.time_to_live =
|
||||
ipv4_spec->hdr.time_to_live;
|
||||
list[t].m_u.ipv4_hdr.time_to_live =
|
||||
UINT8_MAX;
|
||||
}
|
||||
if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
|
||||
list[t].h_u.ipv4_hdr.protocol =
|
||||
ipv4_spec->hdr.next_proto_id;
|
||||
list[t].m_u.ipv4_hdr.protocol =
|
||||
UINT8_MAX;
|
||||
}
|
||||
if (ipv4_mask->hdr.type_of_service ==
|
||||
UINT8_MAX) {
|
||||
list[t].h_u.ipv4_hdr.tos =
|
||||
ipv4_spec->hdr.type_of_service;
|
||||
list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
|
||||
}
|
||||
t++;
|
||||
} else if (!ipv4_spec && !ipv4_mask) {
|
||||
list[t].type = (tun_type == ICE_NON_TUN) ?
|
||||
ICE_IPV4_OFOS : ICE_IPV4_IL;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_IPV6:
|
||||
ipv6_spec = item->spec;
|
||||
ipv6_mask = item->mask;
|
||||
if (ipv6_spec && ipv6_mask) {
|
||||
list[t].type = (tun_type == ICE_NON_TUN) ?
|
||||
ICE_IPV6_OFOS : ICE_IPV6_IL;
|
||||
struct ice_ipv6_hdr *f;
|
||||
struct ice_ipv6_hdr *s;
|
||||
f = &list[t].h_u.ipv6_hdr;
|
||||
s = &list[t].m_u.ipv6_hdr;
|
||||
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
|
||||
if (ipv6_mask->hdr.src_addr[j] ==
|
||||
UINT8_MAX) {
|
||||
f->src_addr[j] =
|
||||
ipv6_spec->hdr.src_addr[j];
|
||||
s->src_addr[j] =
|
||||
ipv6_mask->hdr.src_addr[j];
|
||||
}
|
||||
if (ipv6_mask->hdr.dst_addr[j] ==
|
||||
UINT8_MAX) {
|
||||
f->dst_addr[j] =
|
||||
ipv6_spec->hdr.dst_addr[j];
|
||||
s->dst_addr[j] =
|
||||
ipv6_mask->hdr.dst_addr[j];
|
||||
}
|
||||
}
|
||||
if (ipv6_mask->hdr.proto == UINT8_MAX) {
|
||||
f->next_hdr =
|
||||
ipv6_spec->hdr.proto;
|
||||
s->next_hdr = UINT8_MAX;
|
||||
}
|
||||
if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
|
||||
f->hop_limit =
|
||||
ipv6_spec->hdr.hop_limits;
|
||||
s->hop_limit = UINT8_MAX;
|
||||
}
|
||||
t++;
|
||||
} else if (!ipv6_spec && !ipv6_mask) {
|
||||
list[t].type = (tun_type == ICE_NON_TUN) ?
|
||||
ICE_IPV4_OFOS : ICE_IPV4_IL;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_UDP:
|
||||
udp_spec = item->spec;
|
||||
udp_mask = item->mask;
|
||||
if (udp_spec && udp_mask) {
|
||||
if (tun_type == ICE_SW_TUN_VXLAN &&
|
||||
tunnel_valid == 0)
|
||||
list[t].type = ICE_UDP_OF;
|
||||
else
|
||||
list[t].type = ICE_UDP_ILOS;
|
||||
if (udp_mask->hdr.src_port == UINT16_MAX) {
|
||||
list[t].h_u.l4_hdr.src_port =
|
||||
udp_spec->hdr.src_port;
|
||||
list[t].m_u.l4_hdr.src_port =
|
||||
udp_mask->hdr.src_port;
|
||||
}
|
||||
if (udp_mask->hdr.dst_port == UINT16_MAX) {
|
||||
list[t].h_u.l4_hdr.dst_port =
|
||||
udp_spec->hdr.dst_port;
|
||||
list[t].m_u.l4_hdr.dst_port =
|
||||
udp_mask->hdr.dst_port;
|
||||
}
|
||||
t++;
|
||||
} else if (!udp_spec && !udp_mask) {
|
||||
list[t].type = ICE_UDP_ILOS;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_TCP:
|
||||
tcp_spec = item->spec;
|
||||
tcp_mask = item->mask;
|
||||
if (tcp_spec && tcp_mask) {
|
||||
list[t].type = ICE_TCP_IL;
|
||||
if (tcp_mask->hdr.src_port == UINT16_MAX) {
|
||||
list[t].h_u.l4_hdr.src_port =
|
||||
tcp_spec->hdr.src_port;
|
||||
list[t].m_u.l4_hdr.src_port =
|
||||
tcp_mask->hdr.src_port;
|
||||
}
|
||||
if (tcp_mask->hdr.dst_port == UINT16_MAX) {
|
||||
list[t].h_u.l4_hdr.dst_port =
|
||||
tcp_spec->hdr.dst_port;
|
||||
list[t].m_u.l4_hdr.dst_port =
|
||||
tcp_mask->hdr.dst_port;
|
||||
}
|
||||
t++;
|
||||
} else if (!tcp_spec && !tcp_mask) {
|
||||
list[t].type = ICE_TCP_IL;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_SCTP:
|
||||
sctp_spec = item->spec;
|
||||
sctp_mask = item->mask;
|
||||
if (sctp_spec && sctp_mask) {
|
||||
list[t].type = ICE_SCTP_IL;
|
||||
if (sctp_mask->hdr.src_port == UINT16_MAX) {
|
||||
list[t].h_u.sctp_hdr.src_port =
|
||||
sctp_spec->hdr.src_port;
|
||||
list[t].m_u.sctp_hdr.src_port =
|
||||
sctp_mask->hdr.src_port;
|
||||
}
|
||||
if (sctp_mask->hdr.dst_port == UINT16_MAX) {
|
||||
list[t].h_u.sctp_hdr.dst_port =
|
||||
sctp_spec->hdr.dst_port;
|
||||
list[t].m_u.sctp_hdr.dst_port =
|
||||
sctp_mask->hdr.dst_port;
|
||||
}
|
||||
t++;
|
||||
} else if (!sctp_spec && !sctp_mask) {
|
||||
list[t].type = ICE_SCTP_IL;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_VXLAN:
|
||||
vxlan_spec = item->spec;
|
||||
vxlan_mask = item->mask;
|
||||
tunnel_valid = 1;
|
||||
if (vxlan_spec && vxlan_mask) {
|
||||
list[t].type = ICE_VXLAN;
|
||||
if (vxlan_mask->vni[0] == UINT8_MAX &&
|
||||
vxlan_mask->vni[1] == UINT8_MAX &&
|
||||
vxlan_mask->vni[2] == UINT8_MAX) {
|
||||
list[t].h_u.tnl_hdr.vni =
|
||||
(vxlan_spec->vni[2] << 16) |
|
||||
(vxlan_spec->vni[1] << 8) |
|
||||
vxlan_spec->vni[0];
|
||||
list[t].m_u.tnl_hdr.vni =
|
||||
UINT32_MAX;
|
||||
}
|
||||
t++;
|
||||
} else if (!vxlan_spec && !vxlan_mask) {
|
||||
list[t].type = ICE_VXLAN;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_NVGRE:
|
||||
nvgre_spec = item->spec;
|
||||
nvgre_mask = item->mask;
|
||||
tunnel_valid = 1;
|
||||
if (nvgre_spec && nvgre_mask) {
|
||||
list[t].type = ICE_NVGRE;
|
||||
if (nvgre_mask->tni[0] == UINT8_MAX &&
|
||||
nvgre_mask->tni[1] == UINT8_MAX &&
|
||||
nvgre_mask->tni[2] == UINT8_MAX) {
|
||||
list[t].h_u.nvgre_hdr.tni_flow =
|
||||
(nvgre_spec->tni[2] << 16) |
|
||||
(nvgre_spec->tni[1] << 8) |
|
||||
nvgre_spec->tni[0];
|
||||
list[t].m_u.nvgre_hdr.tni_flow =
|
||||
UINT32_MAX;
|
||||
}
|
||||
t++;
|
||||
} else if (!nvgre_spec && !nvgre_mask) {
|
||||
list[t].type = ICE_NVGRE;
|
||||
}
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ITEM_TYPE_VOID:
|
||||
case RTE_FLOW_ITEM_TYPE_END:
|
||||
break;
|
||||
|
||||
default:
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, actions,
|
||||
"Invalid pattern item.");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
*lkups_num = t;
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/* By now ice switch filter action code implement only
|
||||
* supports QUEUE or DROP.
|
||||
*/
|
||||
static int
|
||||
ice_parse_switch_action(struct ice_pf *pf,
|
||||
const struct rte_flow_action *actions,
|
||||
struct rte_flow_error *error,
|
||||
struct ice_adv_rule_info *rule_info)
|
||||
{
|
||||
struct ice_vsi *vsi = pf->main_vsi;
|
||||
const struct rte_flow_action_queue *act_q;
|
||||
uint16_t base_queue;
|
||||
const struct rte_flow_action *action;
|
||||
enum rte_flow_action_type action_type;
|
||||
|
||||
base_queue = pf->base_queue;
|
||||
for (action = actions; action->type !=
|
||||
RTE_FLOW_ACTION_TYPE_END; action++) {
|
||||
action_type = action->type;
|
||||
switch (action_type) {
|
||||
case RTE_FLOW_ACTION_TYPE_QUEUE:
|
||||
act_q = action->conf;
|
||||
rule_info->sw_act.fltr_act =
|
||||
ICE_FWD_TO_Q;
|
||||
rule_info->sw_act.fwd_id.q_id =
|
||||
base_queue + act_q->index;
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ACTION_TYPE_DROP:
|
||||
rule_info->sw_act.fltr_act =
|
||||
ICE_DROP_PACKET;
|
||||
break;
|
||||
|
||||
case RTE_FLOW_ACTION_TYPE_VOID:
|
||||
break;
|
||||
|
||||
default:
|
||||
rte_flow_error_set(error,
|
||||
EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
actions,
|
||||
"Invalid action type");
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
|
||||
rule_info->sw_act.vsi_handle = vsi->idx;
|
||||
rule_info->rx = 1;
|
||||
rule_info->sw_act.src = vsi->idx;
|
||||
rule_info->priority = 5;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_switch_rule_set(struct ice_pf *pf,
|
||||
struct ice_adv_lkup_elem *list,
|
||||
uint16_t lkups_cnt,
|
||||
struct ice_adv_rule_info *rule_info,
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
struct ice_hw *hw = ICE_PF_TO_HW(pf);
|
||||
int ret;
|
||||
struct ice_rule_query_data rule_added = {0};
|
||||
struct ice_rule_query_data *filter_ptr;
|
||||
|
||||
if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
|
||||
"item number too large for rule");
|
||||
return -rte_errno;
|
||||
}
|
||||
if (!list) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM, NULL,
|
||||
"lookup list should not be NULL");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
|
||||
|
||||
if (!ret) {
|
||||
filter_ptr = rte_zmalloc("ice_switch_filter",
|
||||
sizeof(struct ice_rule_query_data), 0);
|
||||
if (!filter_ptr) {
|
||||
PMD_DRV_LOG(ERR, "failed to allocate memory");
|
||||
return -EINVAL;
|
||||
}
|
||||
flow->rule = filter_ptr;
|
||||
rte_memcpy(filter_ptr,
|
||||
&rule_added,
|
||||
sizeof(struct ice_rule_query_data));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
ice_create_switch_filter(struct ice_pf *pf,
|
||||
const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ice_adv_rule_info rule_info = {0};
|
||||
struct ice_adv_lkup_elem *list = NULL;
|
||||
uint16_t lkups_num = 0;
|
||||
const struct rte_flow_item *item = pattern;
|
||||
uint16_t item_num = 0;
|
||||
enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
|
||||
|
||||
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
|
||||
item_num++;
|
||||
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
|
||||
tun_type = ICE_SW_TUN_VXLAN;
|
||||
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
|
||||
tun_type = ICE_SW_TUN_NVGRE;
|
||||
/* reserve one more memory slot for ETH which may
|
||||
* consume 2 lookup items.
|
||||
*/
|
||||
if (item->type == RTE_FLOW_ITEM_TYPE_ETH)
|
||||
item_num++;
|
||||
}
|
||||
rule_info.tun_type = tun_type;
|
||||
|
||||
list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
|
||||
if (!list) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
||||
"No memory for PMD internal items");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
ret = ice_parse_switch_filter(pattern, actions, error,
|
||||
list, &lkups_num, tun_type);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = ice_parse_switch_action(pf, actions, error, &rule_info);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
rte_free(list);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
rte_free(list);
|
||||
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
int
|
||||
ice_destroy_switch_filter(struct ice_pf *pf,
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
struct ice_hw *hw = ICE_PF_TO_HW(pf);
|
||||
int ret;
|
||||
struct ice_rule_query_data *filter_ptr;
|
||||
|
||||
filter_ptr = (struct ice_rule_query_data *)
|
||||
flow->rule;
|
||||
|
||||
if (!filter_ptr) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
||||
"no such flow"
|
||||
" create by switch filter");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
|
||||
if (ret) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
|
||||
"fail to destroy switch filter rule");
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
rte_free(filter_ptr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
ice_free_switch_filter_rule(void *rule)
|
||||
{
|
||||
struct ice_rule_query_data *filter_ptr;
|
||||
|
||||
filter_ptr = (struct ice_rule_query_data *)rule;
|
||||
|
||||
rte_free(filter_ptr);
|
||||
}
|
||||
|
@ -2,23 +2,3 @@
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _ICE_SWITCH_FILTER_H_
|
||||
#define _ICE_SWITCH_FILTER_H_
|
||||
|
||||
#include "base/ice_switch.h"
|
||||
#include "base/ice_type.h"
|
||||
#include "ice_ethdev.h"
|
||||
|
||||
int
|
||||
ice_create_switch_filter(struct ice_pf *pf,
|
||||
const struct rte_flow_item pattern[],
|
||||
const struct rte_flow_action actions[],
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error);
|
||||
int
|
||||
ice_destroy_switch_filter(struct ice_pf *pf,
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error);
|
||||
void
|
||||
ice_free_switch_filter_rule(void *rule);
|
||||
#endif /* _ICE_SWITCH_FILTER_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user