flow_classify: introduce flow classify library

The following APIs's are implemented in the
librte_flow_classify library:

rte_flow_classifier_create
rte_flow_classifier_free
rte_flow_classifier_query
rte_flow_classify_table_create
rte_flow_classify_table_entry_add
rte_flow_classify_table_entry_delete

The following librte_table API's are used:
f_create to create a table.
f_add to add a rule to the table.
f_del to delete a rule from the table.
f_free to free a table
f_lookup to match packets with the rules.

The library supports counting of IPv4 five tupple packets only,
ie IPv4 UDP, TCP and SCTP packets.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Acked-by: Jasvinder Singh <jasvinder.singh@intel.com>
This commit is contained in:
Ferruh Yigit 2017-10-24 18:28:00 +01:00 committed by Thomas Monjalon
parent 490424e6c9
commit be41ac2a33
13 changed files with 1688 additions and 2 deletions

View File

@ -707,6 +707,12 @@ M: Mark Kavanagh <mark.b.kavanagh@intel.com>
F: lib/librte_gso/ F: lib/librte_gso/
F: doc/guides/prog_guide/generic_segmentation_offload_lib.rst F: doc/guides/prog_guide/generic_segmentation_offload_lib.rst
Flow Classify - EXPERIMENTAL
M: Bernard Iremonger <bernard.iremonger@intel.com>
F: lib/librte_flow_classify/
F: test/test/test_flow_classify*
F: examples/flow_classify/
Distributor Distributor
M: Bruce Richardson <bruce.richardson@intel.com> M: Bruce Richardson <bruce.richardson@intel.com>
M: David Hunt <david.hunt@intel.com> M: David Hunt <david.hunt@intel.com>
@ -740,7 +746,6 @@ F: doc/guides/prog_guide/pdump_lib.rst
F: app/pdump/ F: app/pdump/
F: doc/guides/tools/pdump.rst F: doc/guides/tools/pdump.rst
Packet Framework Packet Framework
---------------- ----------------
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com> M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>

View File

@ -706,6 +706,11 @@ CONFIG_RTE_LIBRTE_GSO=y
# #
CONFIG_RTE_LIBRTE_METER=y CONFIG_RTE_LIBRTE_METER=y
#
# Compile librte_classify
#
CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=y
# #
# Compile librte_sched # Compile librte_sched
# #

View File

@ -127,7 +127,8 @@ The public API headers are grouped by topics:
[distributor] (@ref rte_distributor.h), [distributor] (@ref rte_distributor.h),
[EFD] (@ref rte_efd.h), [EFD] (@ref rte_efd.h),
[ACL] (@ref rte_acl.h), [ACL] (@ref rte_acl.h),
[member] (@ref rte_member.h) [member] (@ref rte_member.h),
[flow classify] (@ref rte_flow_classify.h)
- **containers**: - **containers**:
[mbuf] (@ref rte_mbuf.h), [mbuf] (@ref rte_mbuf.h),

View File

@ -48,6 +48,7 @@ INPUT = doc/api/doxy-api-index.md \
lib/librte_efd \ lib/librte_efd \
lib/librte_ether \ lib/librte_ether \
lib/librte_eventdev \ lib/librte_eventdev \
lib/librte_flow_classify \
lib/librte_gro \ lib/librte_gro \
lib/librte_gso \ lib/librte_gso \
lib/librte_hash \ lib/librte_hash \

View File

@ -165,6 +165,12 @@ New Features
checksums, and doesn't update checksums for output packets. checksums, and doesn't update checksums for output packets.
Additionally, the GSO library doesn't process IP fragmented packets. Additionally, the GSO library doesn't process IP fragmented packets.
* **Added the Flow Classification Library.**
Added the Flow Classification library, it provides an API for DPDK
applications to classify an input packet by matching it against a set of flow
rules. It uses the librte_table API to manage the flow rules.
Resolved Issues Resolved Issues
--------------- ---------------
@ -354,6 +360,7 @@ The libraries prepended with a plus sign were incremented in this version.
+ librte_eal.so.6 + librte_eal.so.6
+ librte_ethdev.so.8 + librte_ethdev.so.8
+ librte_eventdev.so.3 + librte_eventdev.so.3
+ librte_flow_classify.so.1
librte_gro.so.1 librte_gro.so.1
+ librte_gso.so.1 + librte_gso.so.1
librte_hash.so.2 librte_hash.so.2

View File

@ -83,6 +83,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_POWER) += librte_power
DEPDIRS-librte_power := librte_eal DEPDIRS-librte_power := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter
DEPDIRS-librte_meter := librte_eal DEPDIRS-librte_meter := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += librte_flow_classify
DEPDIRS-librte_flow_classify := librte_net librte_table librte_acl
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched
DEPDIRS-librte_sched := librte_eal librte_mempool librte_mbuf librte_net DEPDIRS-librte_sched := librte_eal librte_mempool librte_mbuf librte_net
DEPDIRS-librte_sched += librte_timer DEPDIRS-librte_sched += librte_timer

View File

@ -0,0 +1,53 @@
# BSD LICENSE
#
# Copyright(c) 2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
# library name
LIB = librte_flow_classify.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
EXPORT_MAP := rte_flow_classify_version.map
LIBABIVER := 1
LDLIBS += -lrte_eal -lrte_ethdev -lrte_net -lrte_table -lrte_acl
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify.c
SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify_parse.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY)-include := rte_flow_classify.h
include $(RTE_SDK)/mk/rte.lib.mk

View File

@ -0,0 +1,690 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_flow_classify.h>
#include "rte_flow_classify_parse.h"
#include <rte_flow_driver.h>
#include <rte_table_acl.h>
#include <stdbool.h>
int librte_flow_classify_logtype;
static struct rte_eth_ntuple_filter ntuple_filter;
static uint32_t unique_id = 1;
struct rte_flow_classify_table_entry {
/* meta-data for classify rule */
uint32_t rule_id;
};
struct rte_table {
/* Input parameters */
struct rte_table_ops ops;
uint32_t entry_size;
enum rte_flow_classify_table_type type;
/* Handle to the low-level table object */
void *h_table;
};
#define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
struct rte_flow_classifier {
/* Input parameters */
char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
int socket_id;
enum rte_flow_classify_table_type type;
/* Internal tables */
struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
uint32_t num_tables;
uint16_t nb_pkts;
struct rte_flow_classify_table_entry
*entries[RTE_PORT_IN_BURST_SIZE_MAX];
} __rte_cache_aligned;
enum {
PROTO_FIELD_IPV4,
SRC_FIELD_IPV4,
DST_FIELD_IPV4,
SRCP_FIELD_IPV4,
DSTP_FIELD_IPV4,
NUM_FIELDS_IPV4
};
struct acl_keys {
struct rte_table_acl_rule_add_params key_add; /* add key */
struct rte_table_acl_rule_delete_params key_del; /* delete key */
};
struct classify_rules {
enum rte_flow_classify_rule_type type;
union {
struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
} u;
};
struct rte_flow_classify_rule {
uint32_t id; /* unique ID of classify rule */
struct rte_flow_action action; /* action when match found */
struct classify_rules rules; /* union of rules */
union {
struct acl_keys key;
} u;
int key_found; /* rule key found in table */
void *entry; /* pointer to buffer to hold rule meta data */
void *entry_ptr; /* handle to the table entry for rule meta data */
};
static int
flow_classify_parse_flow(
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct rte_flow_item *items;
parse_filter_t parse_filter;
uint32_t item_num = 0;
uint32_t i = 0;
int ret;
memset(&ntuple_filter, 0, sizeof(ntuple_filter));
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
item_num++;
i++;
}
item_num++;
items = malloc(item_num * sizeof(struct rte_flow_item));
if (!items) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ITEM_NUM,
NULL, "No memory for pattern items.");
return -ENOMEM;
}
memset(items, 0, item_num * sizeof(struct rte_flow_item));
classify_pattern_skip_void_item(items, pattern);
parse_filter = classify_find_parse_filter_func(items);
if (!parse_filter) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
pattern, "Unsupported pattern");
free(items);
return -EINVAL;
}
ret = parse_filter(attr, items, actions, &ntuple_filter, error);
free(items);
return ret;
}
#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
#define uint32_t_to_char(ip, a, b, c, d) do {\
*a = (unsigned char)(ip >> 24 & 0xff);\
*b = (unsigned char)(ip >> 16 & 0xff);\
*c = (unsigned char)(ip >> 8 & 0xff);\
*d = (unsigned char)(ip & 0xff);\
} while (0)
static inline void
print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
{
unsigned char a, b, c, d;
printf("%s: 0x%02hhx/0x%hhx ", __func__,
key->field_value[PROTO_FIELD_IPV4].value.u8,
key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
&a, &b, &c, &d);
printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
key->field_value[SRC_FIELD_IPV4].mask_range.u32);
uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
&a, &b, &c, &d);
printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
key->field_value[DST_FIELD_IPV4].mask_range.u32);
printf("%hu : 0x%x %hu : 0x%x",
key->field_value[SRCP_FIELD_IPV4].value.u16,
key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
key->field_value[DSTP_FIELD_IPV4].value.u16,
key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
printf(" priority: 0x%x\n", key->priority);
}
static inline void
print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
{
unsigned char a, b, c, d;
printf("%s: 0x%02hhx/0x%hhx ", __func__,
key->field_value[PROTO_FIELD_IPV4].value.u8,
key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
&a, &b, &c, &d);
printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
key->field_value[SRC_FIELD_IPV4].mask_range.u32);
uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
&a, &b, &c, &d);
printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
key->field_value[DST_FIELD_IPV4].mask_range.u32);
printf("%hu : 0x%x %hu : 0x%x\n",
key->field_value[SRCP_FIELD_IPV4].value.u16,
key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
key->field_value[DSTP_FIELD_IPV4].value.u16,
key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
}
#endif
static int
rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
{
if (params == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: Incorrect value for parameter params\n", __func__);
return -EINVAL;
}
/* name */
if (params->name == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: Incorrect value for parameter name\n", __func__);
return -EINVAL;
}
/* socket */
if ((params->socket_id < 0) ||
(params->socket_id >= RTE_MAX_NUMA_NODES)) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: Incorrect value for parameter socket_id\n",
__func__);
return -EINVAL;
}
return 0;
}
struct rte_flow_classifier *
rte_flow_classifier_create(struct rte_flow_classifier_params *params)
{
struct rte_flow_classifier *cls;
int ret;
/* Check input parameters */
ret = rte_flow_classifier_check_params(params);
if (ret != 0) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: flow classifier params check failed (%d)\n",
__func__, ret);
return NULL;
}
/* Allocate memory for the flow classifier */
cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
sizeof(struct rte_flow_classifier),
RTE_CACHE_LINE_SIZE, params->socket_id);
if (cls == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: flow classifier memory allocation failed\n",
__func__);
return NULL;
}
/* Save input parameters */
snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
params->name);
cls->socket_id = params->socket_id;
cls->type = params->type;
/* Initialize flow classifier internal data structure */
cls->num_tables = 0;
return cls;
}
static void
rte_flow_classify_table_free(struct rte_table *table)
{
if (table->ops.f_free != NULL)
table->ops.f_free(table->h_table);
}
int
rte_flow_classifier_free(struct rte_flow_classifier *cls)
{
uint32_t i;
/* Check input parameters */
if (cls == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: rte_flow_classifier parameter is NULL\n",
__func__);
return -EINVAL;
}
/* Free tables */
for (i = 0; i < cls->num_tables; i++) {
struct rte_table *table = &cls->tables[i];
rte_flow_classify_table_free(table);
}
/* Free flow classifier memory */
rte_free(cls);
return 0;
}
static int
rte_table_check_params(struct rte_flow_classifier *cls,
struct rte_flow_classify_table_params *params,
uint32_t *table_id)
{
if (cls == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: flow classifier parameter is NULL\n",
__func__);
return -EINVAL;
}
if (params == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
__func__);
return -EINVAL;
}
if (table_id == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR, "%s: table_id parameter is NULL\n",
__func__);
return -EINVAL;
}
/* ops */
if (params->ops == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
__func__);
return -EINVAL;
}
if (params->ops->f_create == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: f_create function pointer is NULL\n", __func__);
return -EINVAL;
}
if (params->ops->f_lookup == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: f_lookup function pointer is NULL\n", __func__);
return -EINVAL;
}
/* De we have room for one more table? */
if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
RTE_FLOW_CLASSIFY_LOG(ERR,
"%s: Incorrect value for num_tables parameter\n",
__func__);
return -EINVAL;
}
return 0;
}
int
rte_flow_classify_table_create(struct rte_flow_classifier *cls,
struct rte_flow_classify_table_params *params,
uint32_t *table_id)
{
struct rte_table *table;
void *h_table;
uint32_t entry_size, id;
int ret;
/* Check input arguments */
ret = rte_table_check_params(cls, params, table_id);
if (ret != 0)
return ret;
id = cls->num_tables;
table = &cls->tables[id];
/* calculate table entry size */
entry_size = sizeof(struct rte_flow_classify_table_entry);
/* Create the table */
h_table = params->ops->f_create(params->arg_create, cls->socket_id,
entry_size);
if (h_table == NULL) {
RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
__func__);
return -EINVAL;
}
/* Commit current table to the classifier */
cls->num_tables++;
*table_id = id;
/* Save input parameters */
memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
/* Initialize table internal data structure */
table->entry_size = entry_size;
table->h_table = h_table;
return 0;
}
static struct rte_flow_classify_rule *
allocate_acl_ipv4_5tuple_rule(void)
{
struct rte_flow_classify_rule *rule;
rule = malloc(sizeof(struct rte_flow_classify_rule));
if (!rule)
return rule;
memset(rule, 0, sizeof(struct rte_flow_classify_rule));
rule->id = unique_id++;
rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
memcpy(&rule->action, classify_get_flow_action(),
sizeof(struct rte_flow_action));
/* key add values */
rule->u.key.key_add.priority = ntuple_filter.priority;
rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
ntuple_filter.proto_mask;
rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
ntuple_filter.proto;
rule->rules.u.ipv4_5tuple.proto = ntuple_filter.proto;
rule->rules.u.ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
ntuple_filter.src_ip_mask;
rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
ntuple_filter.src_ip;
rule->rules.u.ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
rule->rules.u.ipv4_5tuple.src_ip = ntuple_filter.src_ip;
rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
ntuple_filter.dst_ip_mask;
rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
ntuple_filter.dst_ip;
rule->rules.u.ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
rule->rules.u.ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
ntuple_filter.src_port_mask;
rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
ntuple_filter.src_port;
rule->rules.u.ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
rule->rules.u.ipv4_5tuple.src_port = ntuple_filter.src_port;
rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
ntuple_filter.dst_port_mask;
rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
ntuple_filter.dst_port;
rule->rules.u.ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
rule->rules.u.ipv4_5tuple.dst_port = ntuple_filter.dst_port;
#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
print_acl_ipv4_key_add(&rule->u.key.key_add);
#endif
/* key delete values */
memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
&rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
#ifdef RTE_LIBRTE_CLASSIFY_DEBUG
print_acl_ipv4_key_delete(&rule->u.key.key_del);
#endif
return rule;
}
struct rte_flow_classify_rule *
rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
uint32_t table_id,
int *key_found,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct rte_flow_classify_rule *rule;
struct rte_flow_classify_table_entry *table_entry;
int ret;
if (!error)
return NULL;
if (!cls) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "NULL classifier.");
return NULL;
}
if (table_id >= cls->num_tables) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "invalid table_id.");
return NULL;
}
if (key_found == NULL) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "NULL key_found.");
return NULL;
}
if (!pattern) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_NUM,
NULL, "NULL pattern.");
return NULL;
}
if (!actions) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_NUM,
NULL, "NULL action.");
return NULL;
}
if (!attr) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR,
NULL, "NULL attribute.");
return NULL;
}
/* parse attr, pattern and actions */
ret = flow_classify_parse_flow(attr, pattern, actions, error);
if (ret < 0)
return NULL;
switch (cls->type) {
case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
rule = allocate_acl_ipv4_5tuple_rule();
if (!rule)
return NULL;
break;
default:
return NULL;
}
rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
if (!rule->entry) {
free(rule);
return NULL;
}
table_entry = rule->entry;
table_entry->rule_id = rule->id;
if (cls->tables[table_id].ops.f_add != NULL) {
ret = cls->tables[table_id].ops.f_add(
cls->tables[table_id].h_table,
&rule->u.key.key_add,
rule->entry,
&rule->key_found,
&rule->entry_ptr);
if (ret) {
free(rule->entry);
free(rule);
return NULL;
}
*key_found = rule->key_found;
}
return rule;
}
int
rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
uint32_t table_id,
struct rte_flow_classify_rule *rule)
{
int ret = -EINVAL;
if (!cls || !rule || table_id >= cls->num_tables)
return ret;
if (cls->tables[table_id].ops.f_delete != NULL)
ret = cls->tables[table_id].ops.f_delete(
cls->tables[table_id].h_table,
&rule->u.key.key_del,
&rule->key_found,
&rule->entry);
return ret;
}
static int
flow_classifier_lookup(struct rte_flow_classifier *cls,
uint32_t table_id,
struct rte_mbuf **pkts,
const uint16_t nb_pkts)
{
int ret = -EINVAL;
uint64_t pkts_mask;
uint64_t lookup_hit_mask;
pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
ret = cls->tables[table_id].ops.f_lookup(
cls->tables[table_id].h_table,
pkts, pkts_mask, &lookup_hit_mask,
(void **)cls->entries);
if (!ret && lookup_hit_mask)
cls->nb_pkts = nb_pkts;
else
cls->nb_pkts = 0;
return ret;
}
static int
action_apply(struct rte_flow_classifier *cls,
struct rte_flow_classify_rule *rule,
struct rte_flow_classify_stats *stats)
{
struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
uint64_t count = 0;
int i;
int ret = -EINVAL;
switch (rule->action.type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
for (i = 0; i < cls->nb_pkts; i++) {
if (rule->id == cls->entries[i]->rule_id)
count++;
}
if (count) {
ret = 0;
ntuple_stats =
(struct rte_flow_classify_ipv4_5tuple_stats *)
stats->stats;
ntuple_stats->counter1 = count;
ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
}
break;
default:
ret = -ENOTSUP;
break;
}
return ret;
}
int
rte_flow_classifier_query(struct rte_flow_classifier *cls,
uint32_t table_id,
struct rte_mbuf **pkts,
const uint16_t nb_pkts,
struct rte_flow_classify_rule *rule,
struct rte_flow_classify_stats *stats)
{
int ret = -EINVAL;
if (!cls || !rule || !stats || !pkts || nb_pkts == 0 ||
table_id >= cls->num_tables)
return ret;
ret = flow_classifier_lookup(cls, table_id, pkts, nb_pkts);
if (!ret)
ret = action_apply(cls, rule, stats);
return ret;
}
RTE_INIT(librte_flow_classify_init_log);
static void
librte_flow_classify_init_log(void)
{
librte_flow_classify_logtype =
rte_log_register("librte.flow_classify");
if (librte_flow_classify_logtype >= 0)
rte_log_set_level(librte_flow_classify_logtype, RTE_LOG_DEBUG);
}

View File

@ -0,0 +1,289 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_FLOW_CLASSIFY_H_
#define _RTE_FLOW_CLASSIFY_H_
/**
* @file
*
* RTE Flow Classify Library
*
* @b EXPERIMENTAL: this API may change without prior notice
*
* This library provides flow record information with some measured properties.
*
* Application should define the flow and measurement criteria (action) for it.
*
* The Library doesn't maintain any flow records itself, instead flow
* information is returned to upper layer only for given packets.
*
* It is application's responsibility to call rte_flow_classifier_query()
* for a burst of packets, just after receiving them or before transmitting
* them.
* Application should provide the flow type interested in, measurement to apply
* to that flow in rte_flow_classify_table_entry_add() API, and should provide
* the rte_flow_classifier object and storage to put results in for the
* rte_flow_classifier_query() API.
*
* Usage:
* - application calls rte_flow_classifier_create() to create an
* rte_flow_classifier object.
* - application calls rte_flow_classify_table_create() to create a table
* in the rte_flow_classifier object.
* - application calls rte_flow_classify_table_entry_add() to add a rule to
* the table in the rte_flow_classifier object.
* - application calls rte_flow_classifier_query() in a polling manner,
* preferably after rte_eth_rx_burst(). This will cause the library to
* match packet information to flow information with some measurements.
* - rte_flow_classifier object can be destroyed when it is no longer needed
* with rte_flow_classifier_free()
*/
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_acl.h>
#include <rte_table_acl.h>
#ifdef __cplusplus
extern "C" {
#endif
extern int librte_flow_classify_logtype;
#define RTE_FLOW_CLASSIFY_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, librte_flow_classify_logtype, "%s(): " fmt, \
__func__, ## args)
/** Opaque data type for flow classifier */
struct rte_flow_classifier;
/** Opaque data type for flow classify rule */
struct rte_flow_classify_rule;
/** Flow classify rule type */
enum rte_flow_classify_rule_type {
/** no type */
RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,
/** IPv4 5tuple type */
RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE,
};
/** Flow classify table type */
enum rte_flow_classify_table_type {
/** no type */
RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE,
/** ACL type */
RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL,
};
/**
* Maximum number of tables allowed for any Flow Classifier instance.
* The value of this parameter cannot be changed.
*/
#define RTE_FLOW_CLASSIFY_TABLE_MAX 64
/** Parameters for flow classifier creation */
struct rte_flow_classifier_params {
/** flow classifier name */
const char *name;
/** CPU socket ID where memory for the flow classifier and its */
/** elements (tables) should be allocated */
int socket_id;
/** Table type */
enum rte_flow_classify_table_type type;
};
/** Parameters for table creation */
struct rte_flow_classify_table_params {
/** Table operations (specific to each table type) */
struct rte_table_ops *ops;
/** Opaque param to be passed to the table create operation */
void *arg_create;
};
/** IPv4 5-tuple data */
struct rte_flow_classify_ipv4_5tuple {
uint32_t dst_ip; /**< Destination IP address in big endian. */
uint32_t dst_ip_mask; /**< Mask of destination IP address. */
uint32_t src_ip; /**< Source IP address in big endian. */
uint32_t src_ip_mask; /**< Mask of destination IP address. */
uint16_t dst_port; /**< Destination port in big endian. */
uint16_t dst_port_mask; /**< Mask of destination port. */
uint16_t src_port; /**< Source Port in big endian. */
uint16_t src_port_mask; /**< Mask of source port. */
uint8_t proto; /**< L4 protocol. */
uint8_t proto_mask; /**< Mask of L4 protocol. */
};
/**
* Flow stats
*
* For the count action, stats can be returned by the query API.
*
* Storage for stats is provided by application.
*/
struct rte_flow_classify_stats {
void *stats;
};
struct rte_flow_classify_ipv4_5tuple_stats {
/** count of packets that match IPv4 5tuple pattern */
uint64_t counter1;
/** IPv4 5tuple data */
struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
};
/**
* Flow classifier create
*
* @param params
* Parameters for flow classifier creation
* @return
* Handle to flow classifier instance on success or NULL otherwise
*/
struct rte_flow_classifier *
rte_flow_classifier_create(struct rte_flow_classifier_params *params);
/**
* Flow classifier free
*
* @param cls
* Handle to flow classifier instance
* @return
* 0 on success, error code otherwise
*/
int
rte_flow_classifier_free(struct rte_flow_classifier *cls);
/**
* Flow classify table create
*
* @param cls
* Handle to flow classifier instance
* @param params
* Parameters for flow_classify table creation
* @param table_id
* Table ID. Valid only within the scope of table IDs of the current
* classifier. Only returned after a successful invocation.
* @return
* 0 on success, error code otherwise
*/
int
rte_flow_classify_table_create(struct rte_flow_classifier *cls,
struct rte_flow_classify_table_params *params,
uint32_t *table_id);
/**
* Add a flow classify rule to the flow_classifer table.
*
* @param[in] cls
* Flow classifier handle
* @param[in] table_id
* id of table
* @param[out] key_found
* returns 1 if key present already, 0 otherwise.
* @param[in] attr
* Flow rule attributes
* @param[in] pattern
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END pattern item).
* @param[out] error
* Perform verbose error reporting if not NULL. Structure
* initialised in case of error only.
* @return
* A valid handle in case of success, NULL otherwise.
*/
struct rte_flow_classify_rule *
rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
uint32_t table_id,
int *key_found,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
/**
* Delete a flow classify rule from the flow_classifer table.
*
* @param[in] cls
* Flow classifier handle
* @param[in] table_id
* id of table
* @param[in] rule
* Flow classify rule
* @return
* 0 on success, error code otherwise.
*/
int
rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
uint32_t table_id,
struct rte_flow_classify_rule *rule);
/**
* Query flow classifier for given rule.
*
* @param[in] cls
* Flow classifier handle
* @param[in] table_id
* id of table
* @param[in] pkts
* Pointer to packets to process
* @param[in] nb_pkts
* Number of packets to process
* @param[in] rule
* Flow classify rule
* @param[in] stats
* Flow classify stats
*
* @return
* 0 on success, error code otherwise.
*/
int
rte_flow_classifier_query(struct rte_flow_classifier *cls,
uint32_t table_id,
struct rte_mbuf **pkts,
const uint16_t nb_pkts,
struct rte_flow_classify_rule *rule,
struct rte_flow_classify_stats *stats);
#ifdef __cplusplus
}
#endif
#endif /* _RTE_FLOW_CLASSIFY_H_ */

View File

@ -0,0 +1,546 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_flow_classify.h>
#include "rte_flow_classify_parse.h"
#include <rte_flow_driver.h>
struct classify_valid_pattern {
enum rte_flow_item_type *items;
parse_filter_t parse_filter;
};
static struct rte_flow_action action;
/* Pattern for IPv4 5-tuple UDP filter */
static enum rte_flow_item_type pattern_ntuple_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_UDP,
RTE_FLOW_ITEM_TYPE_END,
};
/* Pattern for IPv4 5-tuple TCP filter */
static enum rte_flow_item_type pattern_ntuple_2[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_TCP,
RTE_FLOW_ITEM_TYPE_END,
};
/* Pattern for IPv4 5-tuple SCTP filter */
static enum rte_flow_item_type pattern_ntuple_3[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_SCTP,
RTE_FLOW_ITEM_TYPE_END,
};
static int
classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_eth_ntuple_filter *filter,
struct rte_flow_error *error);
static struct classify_valid_pattern classify_supported_patterns[] = {
/* ntuple */
{ pattern_ntuple_1, classify_parse_ntuple_filter },
{ pattern_ntuple_2, classify_parse_ntuple_filter },
{ pattern_ntuple_3, classify_parse_ntuple_filter },
};
struct rte_flow_action *
classify_get_flow_action(void)
{
return &action;
}
/* Find the first VOID or non-VOID item pointer */
const struct rte_flow_item *
classify_find_first_item(const struct rte_flow_item *item, bool is_void)
{
bool is_find;
while (item->type != RTE_FLOW_ITEM_TYPE_END) {
if (is_void)
is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
else
is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
if (is_find)
break;
item++;
}
return item;
}
/* Skip all VOID items of the pattern */
void
classify_pattern_skip_void_item(struct rte_flow_item *items,
const struct rte_flow_item *pattern)
{
uint32_t cpy_count = 0;
const struct rte_flow_item *pb = pattern, *pe = pattern;
for (;;) {
/* Find a non-void item first */
pb = classify_find_first_item(pb, false);
if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
pe = pb;
break;
}
/* Find a void item */
pe = classify_find_first_item(pb + 1, true);
cpy_count = pe - pb;
rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
items += cpy_count;
if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
pb = pe;
break;
}
pb = pe + 1;
}
/* Copy the END item. */
rte_memcpy(items, pe, sizeof(struct rte_flow_item));
}
/* Check if the pattern matches a supported item type array */
static bool
classify_match_pattern(enum rte_flow_item_type *item_array,
struct rte_flow_item *pattern)
{
struct rte_flow_item *item = pattern;
while ((*item_array == item->type) &&
(*item_array != RTE_FLOW_ITEM_TYPE_END)) {
item_array++;
item++;
}
return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
item->type == RTE_FLOW_ITEM_TYPE_END);
}
/* Find if there's parse filter function matched */
parse_filter_t
classify_find_parse_filter_func(struct rte_flow_item *pattern)
{
parse_filter_t parse_filter = NULL;
uint8_t i = 0;
for (; i < RTE_DIM(classify_supported_patterns); i++) {
if (classify_match_pattern(classify_supported_patterns[i].items,
pattern)) {
parse_filter =
classify_supported_patterns[i].parse_filter;
break;
}
}
return parse_filter;
}
#define FLOW_RULE_MIN_PRIORITY 8
#define FLOW_RULE_MAX_PRIORITY 0
#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
do {\
item = pattern + index;\
while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
index++;\
item = pattern + index;\
} \
} while (0)
#define NEXT_ITEM_OF_ACTION(act, actions, index)\
do {\
act = actions + index;\
while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
index++;\
act = actions + index;\
} \
} while (0)
/**
* Please aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
* normally the packets should use network order.
*/
/**
* Parse the rule to see if it is a n-tuple rule.
* And get the n-tuple filter info BTW.
* pattern:
* The first not void item can be ETH or IPV4.
* The second not void item must be IPV4 if the first one is ETH.
* The third not void item must be UDP or TCP.
* The next not void item must be END.
* action:
* The first not void action should be QUEUE.
* The next not void action should be END.
* pattern example:
* ITEM Spec Mask
* ETH NULL NULL
* IPV4 src_addr 192.168.1.20 0xFFFFFFFF
* dst_addr 192.167.3.50 0xFFFFFFFF
* next_proto_id 17 0xFF
* UDP/TCP/ src_port 80 0xFFFF
* SCTP dst_port 80 0xFFFF
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
*/
static int
classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_eth_ntuple_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
const struct rte_flow_action *act;
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_item_udp *udp_spec;
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
uint32_t index;
if (!pattern) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
NULL, "NULL pattern.");
return -EINVAL;
}
if (!actions) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_NUM,
NULL, "NULL action.");
return -EINVAL;
}
if (!attr) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR,
NULL, "NULL attribute.");
return -EINVAL;
}
/* parse pattern */
index = 0;
/* the first not void item can be MAC or IPv4 */
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
/* Skip Ethernet */
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
item,
"Not supported last point for range");
return -EINVAL;
}
/* if the first item is MAC, the content should be NULL */
if (item->spec || item->mask) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Not supported by ntuple filter");
return -EINVAL;
}
/* check if the next not void item is IPv4 */
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Not supported by ntuple filter");
return -EINVAL;
}
}
/* get the IPv4 info */
if (!item->spec || !item->mask) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ntuple mask");
return -EINVAL;
}
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
item, "Not supported last point for range");
return -EINVAL;
}
ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
*/
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.type_of_service ||
ipv4_mask->hdr.total_length ||
ipv4_mask->hdr.packet_id ||
ipv4_mask->hdr.fragment_offset ||
ipv4_mask->hdr.time_to_live ||
ipv4_mask->hdr.hdr_checksum) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
/* check if the next not void item is TCP or UDP or SCTP */
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
/* get the TCP/UDP info */
if (!item->spec || !item->mask) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ntuple mask");
return -EINVAL;
}
/*Not supported last point for range*/
if (item->last) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
item, "Not supported last point for range");
return -EINVAL;
}
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
/**
* Only support src & dst ports, tcp flags,
* others should be masked.
*/
if (tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
memset(filter, 0,
sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
filter->dst_port_mask = tcp_mask->hdr.dst_port;
filter->src_port_mask = tcp_mask->hdr.src_port;
if (tcp_mask->hdr.tcp_flags == 0xFF) {
filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
} else if (!tcp_mask->hdr.tcp_flags) {
filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
} else {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
udp_mask = (const struct rte_flow_item_udp *)item->mask;
/**
* Only support src & dst ports,
* others should be masked.
*/
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
memset(filter, 0,
sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
udp_spec = (const struct rte_flow_item_udp *)item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
} else {
sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
/**
* Only support src & dst ports,
* others should be masked.
*/
if (sctp_mask->hdr.tag ||
sctp_mask->hdr.cksum) {
memset(filter, 0,
sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
filter->dst_port_mask = sctp_mask->hdr.dst_port;
filter->src_port_mask = sctp_mask->hdr.src_port;
sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
}
/* check if the next not void item is END */
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
return -EINVAL;
}
/* parse action */
index = 0;
/**
* n-tuple only supports count,
* check if the first not void action is COUNT.
*/
memset(&action, 0, sizeof(action));
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
item, "Not supported action.");
return -EINVAL;
}
action.type = RTE_FLOW_ACTION_TYPE_COUNT;
/* check if the next not void item is END */
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -EINVAL;
}
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
return -EINVAL;
}
/* not supported */
if (attr->egress) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
return -EINVAL;
}
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Error priority.");
return -EINVAL;
}
filter->priority = (uint16_t)attr->priority;
if (attr->priority > FLOW_RULE_MIN_PRIORITY)
filter->priority = FLOW_RULE_MAX_PRIORITY;
return 0;
}

View File

@ -0,0 +1,74 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RTE_FLOW_CLASSIFY_PARSE_H_
#define _RTE_FLOW_CLASSIFY_PARSE_H_
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*parse_filter_t)(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_eth_ntuple_filter *filter,
struct rte_flow_error *error);
/* Skip all VOID items of the pattern */
void
classify_pattern_skip_void_item(struct rte_flow_item *items,
const struct rte_flow_item *pattern);
/* Find the first VOID or non-VOID item pointer */
const struct rte_flow_item *
classify_find_first_item(const struct rte_flow_item *item, bool is_void);
/* Find if there's parse filter function matched */
parse_filter_t
classify_find_parse_filter_func(struct rte_flow_item *pattern);
/* get action data */
struct rte_flow_action *
classify_get_flow_action(void);
#ifdef __cplusplus
}
#endif
#endif /* _RTE_FLOW_CLASSIFY_PARSE_H_ */

View File

@ -0,0 +1,12 @@
EXPERIMENTAL {
global:
rte_flow_classifier_create;
rte_flow_classifier_free;
rte_flow_classifier_query;
rte_flow_classify_table_create;
rte_flow_classify_table_entry_add;
rte_flow_classify_table_entry_delete;
local: *;
};

View File

@ -58,6 +58,7 @@ _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
# #
# Order is important: from higher level to lower level # Order is important: from higher level to lower level
# #
_LDLIBS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += -lrte_flow_classify
_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += -lrte_pipeline _LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += -lrte_pipeline
_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += -lrte_table _LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += -lrte_table
_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port _LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port