Refactor the flowsteering APIs used by mlx5en(4). This change is needed by

the coming ibcore and mlx5ib updates in order to support traffic redirection
to so-called raw ethernet QPs.

Remove unused E-switch related routines and files while at it.

Sponsored by:	Mellanox Technologies
MFC after:	1 week
This commit is contained in:
Hans Petter Selasky 2017-11-10 09:49:08 +00:00
parent 1529133ab3
commit 5a93b4cd52
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=325638
16 changed files with 4513 additions and 1741 deletions

View File

@ -4644,7 +4644,9 @@ dev/mlx5/mlx5_core/mlx5_diagnostics.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_flow_table.c optional mlx5 pci \
dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \
compile-with "${OFED_C}"

View File

@ -1085,6 +1085,7 @@ enum {
MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5,
MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6,
MLX5_FLOW_TABLE_TYPE_NIC_RX_RDMA = 7,
};
enum {

View File

@ -582,6 +582,7 @@ struct mlx5_special_contexts {
int resd_lkey;
};
struct mlx5_flow_root_namespace;
struct mlx5_core_dev {
struct pci_dev *pdev;
char board_id[MLX5_BOARD_ID_LEN];
@ -600,6 +601,12 @@ struct mlx5_core_dev {
u32 issi;
struct mlx5_special_contexts special_contexts;
unsigned int module_status[MLX5_MAX_PORTS];
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER];
};

View File

@ -1,46 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef MLX5_ESWITCH_VACL_TABLE_H
#define MLX5_ESWITCH_VACL_TABLE_H
#include <dev/mlx5/driver.h>
void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
u16 vport, bool is_egress);
void mlx5_vacl_table_cleanup(void *acl_t);
int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan);
void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan);
int mlx5_vacl_table_enable_vlan_filter(void *acl_t);
void mlx5_vacl_table_disable_vlan_filter(void *acl_t);
int mlx5_vacl_table_drop_untagged(void *acl_t);
int mlx5_vacl_table_allow_untagged(void *acl_t);
int mlx5_vacl_table_drop_unknown_vlan(void *acl_t);
int mlx5_vacl_table_allow_unknown_vlan(void *acl_t);
int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac);
#endif /* MLX5_ESWITCH_VACL_TABLE_H */

View File

@ -1,56 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef MLX5_FLOW_TABLE_H
#define MLX5_FLOW_TABLE_H
#include <dev/mlx5/driver.h>
#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET 0x0
#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_RESET 0x1
struct mlx5_flow_table_group {
u8 log_sz;
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
};
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
u16 vport,
u16 num_groups,
struct mlx5_flow_table_group *group);
void mlx5_destroy_flow_table(void *flow_table);
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
void *match_criteria, void *flow_context,
u32 *flow_index);
int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
u32 mlx5_get_flow_table_id(void *flow_table);
int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
u8 vport_num, u8 table_type, u32 table_id,
u32 underlay_qpn);
#endif /* MLX5_FLOW_TABLE_H */

232
sys/dev/mlx5/fs.h Normal file
View File

@ -0,0 +1,232 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MLX5_FS_
#define _MLX5_FS_
#include <linux/list.h>
#include <dev/mlx5/mlx5_ifc.h>
#include <dev/mlx5/device.h>
#include <dev/mlx5/driver.h>
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
};
/*Flow tag*/
enum {
MLX5_FS_DEFAULT_FLOW_TAG = 0xFFFFFF,
MLX5_FS_ETH_FLOW_TAG = 0xFFFFFE,
MLX5_FS_SNIFFER_FLOW_TAG = 0xFFFFFD,
};
enum {
MLX5_FS_FLOW_TAG_MASK = 0xFFFFFF,
};
#define FS_MAX_TYPES 10
#define FS_MAX_ENTRIES 32000U
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
};
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace;
struct mlx5_flow_spec {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
};
struct mlx5_flow_destination {
u32 type;
union {
u32 tir_num;
struct mlx5_flow_table *ft;
u32 vport_num;
};
};
#define FT_NAME_STR_SZ 20
#define LEFTOVERS_RULE_NUM 2
static inline void build_leftovers_ft_param(char *name,
unsigned int *priority,
int *n_ent,
int *n_grp)
{
snprintf(name, FT_NAME_STR_SZ, "leftovers");
*priority = 0; /*Priority of leftovers_prio-0*/
*n_ent = LEFTOVERS_RULE_NUM + 1; /*1: star rules*/
*n_grp = LEFTOVERS_RULE_NUM;
}
static inline bool outer_header_zero(u32 *match_criteria)
{
int size = MLX5_ST_SZ_BYTES(fte_match_param);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
outer_headers_c + 1,
size - 1);
}
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
/* The underlying implementation create two more entries for
* chaining flow tables. the user should be aware that if he pass
* max_num_ftes as 2^N it will result in doubled size flow table
*/
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
const char *name,
int num_flow_table_entries,
int max_num_groups);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
u16 vport,
int prio,
const char *name,
int num_flow_table_entries);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
const char *name,
int num_flow_table_entries);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
* start_flow_index
* end_flow_index
* match_criteria_enable
* match_criteria
*/
struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
/*The following API is for sniffer*/
typedef int (*rule_event_fn)(struct mlx5_flow_rule *rule,
bool ctx_changed,
void *client_data,
void *context);
struct mlx5_flow_handler;
struct flow_client_priv_data;
void mlx5e_sniffer_roce_mode_notify(
struct mlx5_core_dev *mdev,
int action);
int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule, struct
mlx5_flow_handler *handler, void
*client_data);
struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type ns_type,
rule_event_fn add_cb,
rule_event_fn del_cb,
void *context);
void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler);
void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns,
rule_event_fn cb,
void *context);
void mlx5_get_match_criteria(u32 *match_criteria,
struct mlx5_flow_rule *rule);
void mlx5_get_match_value(u32 *match_value,
struct mlx5_flow_rule *rule);
u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule);
struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode);
void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list);
struct mlx5_flow_rules_list {
struct list_head head;
};
struct mlx5_flow_rule_node {
struct list_head list;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
u8 match_criteria_enable;
};
struct mlx5_core_fs_mask {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
};
bool fs_match_exact_val(
struct mlx5_core_fs_mask *mask,
void *val1,
void *val2);
bool fs_match_exact_mask(
u8 match_criteria_enable1,
u8 match_criteria_enable2,
void *mask1,
void *mask2);
/**********end API for sniffer**********/
#endif

View File

@ -0,0 +1,300 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
#include <asm/atomic.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <dev/mlx5/fs.h>
enum fs_type {
FS_TYPE_NAMESPACE,
FS_TYPE_PRIO,
FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY,
FS_TYPE_FLOW_DEST
};
enum fs_ft_type {
FS_FT_NIC_RX = 0x0,
FS_FT_ESW_EGRESS_ACL = 0x2,
FS_FT_ESW_INGRESS_ACL = 0x3,
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0x5,
FS_FT_SNIFFER_TX = 0x6
};
enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
/* Should always be the first variable in the struct */
struct fs_base {
struct list_head list;
struct fs_base *parent;
enum fs_type type;
struct kref refcount;
/* lock the node for writing and traversing */
struct mutex lock;
struct completion complete;
atomic_t users_refcount;
const char *name;
};
struct mlx5_flow_rule {
struct fs_base base;
struct mlx5_flow_destination dest_attr;
struct list_head clients_data;
/*protect clients lits*/
struct mutex clients_lock;
};
struct fs_fte {
struct fs_base base;
u32 val[MLX5_ST_SZ_DW(fte_match_param)];
uint32_t dests_size;
uint32_t flow_tag;
struct list_head dests;
uint32_t index; /* index in ft */
u8 action; /* MLX5_FLOW_CONTEXT_ACTION */
enum fs_fte_status status;
};
struct fs_star_rule {
struct mlx5_flow_group *fg;
struct fs_fte *fte;
};
struct mlx5_flow_table {
struct fs_base base;
/* sorted list by start_index */
struct list_head fgs;
struct {
bool active;
unsigned int max_types;
unsigned int num_types;
} autogroup;
unsigned int max_fte;
unsigned int level;
uint32_t id;
u16 vport;
enum fs_ft_type type;
struct fs_star_rule star_rule;
unsigned int shared_refcount;
};
enum fs_prio_flags {
MLX5_CORE_FS_PRIO_SHARED = 1
};
struct fs_prio {
struct fs_base base;
struct list_head objs; /* each object is a namespace or ft */
unsigned int max_ft;
unsigned int num_ft;
unsigned int max_ns;
unsigned int prio;
/*When create shared flow table, this lock should be taken*/
struct mutex shared_lock;
u8 flags;
};
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
struct fs_base base;
/* sorted by priority number */
struct list_head prios; /* list of fs_prios */
struct list_head list_notifiers;
struct rw_semaphore notifiers_rw_sem;
struct rw_semaphore dests_rw_sem;
};
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
struct mlx5_flow_table *ft_level_0;
enum fs_ft_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
/* When chaining flow-tables, this lock should be taken */
struct mutex fs_chain_lock;
};
struct mlx5_flow_group {
struct fs_base base;
struct list_head ftes;
struct mlx5_core_fs_mask mask;
uint32_t start_index;
uint32_t max_ftes;
uint32_t num_ftes;
uint32_t id;
};
struct mlx5_flow_handler {
struct list_head list;
rule_event_fn add_dst_cb;
rule_event_fn del_dst_cb;
void *client_context;
struct mlx5_flow_namespace *ns;
};
struct fs_client_priv_data {
struct mlx5_flow_handler *fs_handler;
struct list_head list;
void *client_dst_data;
};
void _fs_remove_node(struct kref *kref);
#define fs_get_obj(v, _base) {v = container_of((_base), typeof(*v), base); }
#define fs_get_parent(v, child) {v = (child)->base.parent ? \
container_of((child)->base.parent, \
typeof(*v), base) : NULL; }
#define fs_list_for_each_entry(pos, cond, root) \
list_for_each_entry(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_continue(pos, cond, root) \
list_for_each_entry_continue(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_reverse(pos, cond, root) \
list_for_each_entry_reverse(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_continue_reverse(pos, cond, root) \
list_for_each_entry_continue_reverse(pos, root, base.list) \
if (!(cond)) {} else
#define fs_for_each_ft(pos, prio) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_TABLE, \
&(prio)->objs)
#define fs_for_each_ft_reverse(pos, prio) \
fs_list_for_each_entry_reverse(pos, \
(pos)->base.type == FS_TYPE_FLOW_TABLE, \
&(prio)->objs)
#define fs_for_each_ns(pos, prio) \
fs_list_for_each_entry(pos, \
(pos)->base.type == FS_TYPE_NAMESPACE, \
&(prio)->objs)
#define fs_for_each_ns_or_ft_reverse(pos, prio) \
list_for_each_entry_reverse(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft(pos, prio) \
list_for_each_entry(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft_continue_reverse(pos, prio) \
list_for_each_entry_continue_reverse(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft_continue(pos, prio) \
list_for_each_entry_continue(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_prio(pos, ns) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_reverse(pos, ns) \
fs_list_for_each_entry_reverse(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_continue(pos, ns) \
fs_list_for_each_entry_continue(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_continue_reverse(pos, ns) \
fs_list_for_each_entry_continue_reverse(pos, \
(pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_fg(pos, ft) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_GROUP, \
&(ft)->fgs)
#define fs_for_each_fte(pos, fg) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_ENTRY, \
&(fg)->ftes)
#define fs_for_each_dst(pos, fte) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_DEST, \
&(fte)->dests)
int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int level,
unsigned int log_size, unsigned int *table_id);
int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id);
int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
u32 *in,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int *group_id);
int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int group_id);
int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
u32 *match_val,
enum fs_ft_type type, unsigned int table_id,
unsigned int index, unsigned int group_id,
unsigned int flow_tag,
unsigned short action, int dest_size,
struct list_head *dests); /* mlx5_flow_desination */
int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
enum fs_ft_type type, unsigned int table_id,
unsigned int index);
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
enum fs_ft_type type,
unsigned int id);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
#endif

View File

@ -1,803 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <linux/etherdevice.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/flow_table.h>
#include <dev/mlx5/eswitch_vacl.h>
#include "mlx5_core.h"
enum {
MLX5_ACL_LOOPBACK_GROUP_IDX = 0,
MLX5_ACL_UNTAGGED_GROUP_IDX = 1,
MLX5_ACL_VLAN_GROUP_IDX = 2,
MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX = 3,
MLX5_ACL_DEFAULT_GROUP_IDX = 4,
MLX5_ACL_GROUPS_NUM,
};
struct mlx_vacl_fr {
bool applied;
u32 fi;
u16 action;
};
struct mlx5_vacl_table {
struct mlx5_core_dev *dev;
u16 vport;
void *ft;
int max_ft_size;
int acl_type;
struct mlx_vacl_fr loopback_fr;
struct mlx_vacl_fr untagged_fr;
struct mlx_vacl_fr unknown_vlan_fr;
struct mlx_vacl_fr default_fr;
bool vlan_filter_enabled;
bool vlan_filter_applied;
unsigned long *vlan_allowed_bitmap;
u32 vlan_fi_table[4096];
bool spoofchk_enabled;
u8 smac[ETH_ALEN];
};
static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
u8 *smac;
int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
int err = 0;
if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
return -EINVAL;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
/* Apply vlan rule */
MLX5_SET(flow_context, flow_context, action,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
vlan);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
0xfff);
if (acl_table->spoofchk_enabled) {
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
in_match_criteria, flow_context,
&acl_table->vlan_fi_table[vlan]);
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
void *mv_misc = NULL;
void *mc_misc = NULL;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->loopback_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->loopback_fr.fi);
/* Apply new loopback rule */
MLX5_SET(flow_context, flow_context, action, new_action);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
mv_misc = MLX5_ADDR_OF(fte_match_param, in_match_value,
misc_parameters);
mc_misc = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
in_match_criteria, flow_context,
&acl_table->loopback_fr.fi);
if (err) {
acl_table->loopback_fr.applied = false;
} else {
acl_table->loopback_fr.applied = true;
acl_table->loopback_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 default_mc_enable = 0;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
int err = 0;
if (!acl_table->spoofchk_enabled)
return -EINVAL;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->default_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->default_fr.fi);
/* Apply new default rule */
MLX5_SET(flow_context, flow_context, action, new_action);
err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
in_match_criteria, flow_context,
&acl_table->default_fr.fi);
if (err) {
acl_table->default_fr.applied = false;
} else {
acl_table->default_fr.applied = true;
acl_table->default_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
u8 *smac;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->untagged_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->untagged_fr.fi);
/* Apply new untagged rule */
MLX5_SET(flow_context, flow_context, action, new_action);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 0);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
if (acl_table->spoofchk_enabled) {
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
in_match_criteria, flow_context,
&acl_table->untagged_fr.fi);
if (err) {
acl_table->untagged_fr.applied = false;
} else {
acl_table->untagged_fr.applied = true;
acl_table->untagged_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
MLX5_MATCH_OUTER_HEADERS;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
u8 *smac;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->unknown_vlan_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->unknown_vlan_fr.fi);
/* Apply new unknown vlan rule */
MLX5_SET(flow_context, flow_context, action, new_action);
if (acl_table->spoofchk_enabled) {
in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
match_value);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
in_match_criteria, flow_context,
&acl_table->unknown_vlan_fr.fi);
if (err) {
acl_table->unknown_vlan_fr.applied = false;
} else {
acl_table->unknown_vlan_fr.applied = true;
acl_table->unknown_vlan_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int index = 0;
int err_index = 0;
int err = 0;
if (acl_table->vlan_filter_applied)
return 0;
for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
index < 4096;
index = find_next_bit(acl_table->vlan_allowed_bitmap,
4096, ++index)) {
err = mlx5_vacl_table_allow_vlan(acl_t, index);
if (err)
goto err_disable_vlans;
}
acl_table->vlan_filter_applied = true;
return 0;
err_disable_vlans:
for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
err_index < index;
err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
++err_index)) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[err_index]);
}
return err;
}
static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int index = 0;
if (!acl_table->vlan_filter_applied)
return;
for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
index < 4096;
index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
++index)) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[index]);
}
acl_table->vlan_filter_applied = false;
}
static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
if (acl_table->default_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->default_fr.fi);
acl_table->default_fr.applied = false;
}
if (acl_table->unknown_vlan_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->unknown_vlan_fr.fi);
acl_table->unknown_vlan_fr.applied = false;
}
if (acl_table->loopback_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->loopback_fr.fi);
acl_table->loopback_fr.applied = false;
}
if (acl_table->untagged_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->untagged_fr.fi);
acl_table->untagged_fr.applied = false;
}
if (acl_table->vlan_filter_applied) {
mlx5_vacl_table_disapply_vlan_filter(acl_t);
acl_table->vlan_filter_applied = false;
}
}
static int mlx5_vacl_table_apply_all_filters(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
err = mlx5_vacl_table_apply_default(acl_table,
acl_table->default_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->unknown_vlan_fr.applied) {
err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
acl_table->unknown_vlan_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->loopback_fr.applied &&
acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
err = mlx5_vacl_table_apply_loopback_filter(
acl_table,
acl_table->loopback_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->untagged_fr.applied) {
err = mlx5_vacl_table_apply_untagged(acl_table,
acl_table->untagged_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
err = mlx5_vacl_table_apply_vlan_filter(acl_t);
if (err)
goto err_disapply_all;
}
goto out;
err_disapply_all:
mlx5_vacl_table_disapply_all_filters(acl_t);
out:
return err;
}
static void mlx5_vacl_table_destroy_ft(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
mlx5_vacl_table_disapply_all_filters(acl_t);
if (acl_table->ft)
mlx5_destroy_flow_table(acl_table->ft);
acl_table->ft = NULL;
}
static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int log_acl_ft_size;
int err = 0;
int groups_num = MLX5_ACL_GROUPS_NUM - 1;
int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
u8 *smac;
struct mlx5_flow_table_group *g;
if (acl_table->ft)
return -EINVAL;
g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
if (!g)
goto out;
acl_table->spoofchk_enabled = spoofchk;
/*
* for vlan group
*/
log_acl_ft_size = 4096;
/*
* for loopback filter rule
*/
log_acl_ft_size += 1;
/*
* for untagged rule
*/
log_acl_ft_size += 1;
/*
* for unknown vlan rule
*/
log_acl_ft_size += 1;
/*
* for default rule
*/
log_acl_ft_size += 1;
log_acl_ft_size = order_base_2(log_acl_ft_size);
log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
if (log_acl_ft_size < 2)
goto out;
if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
/* Loopback filter group */
g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
MLX5_MATCH_MISC_PARAMETERS;
MLX5_SET_TO_ONES(fte_match_param,
g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
misc_parameters.source_port);
groups_num++;
shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
}
/* Untagged traffic group */
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
outer_headers.cvlan_tag, 1);
if (spoofchk) {
smac = MLX5_ADDR_OF(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/* Allowed vlans group */
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
outer_headers.first_vid, 0xfff);
if (spoofchk) {
smac = MLX5_ADDR_OF(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/* Unknown vlan traffic group */
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
(spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
if (spoofchk) {
smac = MLX5_ADDR_OF(
fte_match_param,
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/*
* Default group - for spoofchk only.
*/
g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
acl_table->ft = mlx5_create_flow_table(acl_table->dev,
0,
acl_table->acl_type,
acl_table->vport,
groups_num,
g);
if (!acl_table->ft) {
err = -ENOMEM;
goto out;
}
err = mlx5_vacl_table_apply_all_filters(acl_t);
if (err)
goto err_destroy_ft;
goto out;
err_destroy_ft:
mlx5_vacl_table_destroy_ft(acl_table->ft);
acl_table->ft = NULL;
out:
kfree(g);
return err;
}
void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
u16 vport, bool is_egress)
{
struct mlx5_vacl_table *acl_table;
int err = 0;
if (is_egress && !MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
return NULL;
if (!is_egress && !MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
return NULL;
acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
if (!acl_table)
return NULL;
acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
acl_table->max_ft_size = (is_egress ?
MLX5_CAP_ESW_EGRESS_ACL(dev,
log_max_ft_size) :
MLX5_CAP_ESW_INGRESS_ACL(dev,
log_max_ft_size));
acl_table->dev = dev;
acl_table->vport = vport;
/*
* default behavior : Allow and if spoofchk drop the default
*/
acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
err = mlx5_vacl_table_create_ft(acl_table, false);
if (err)
goto err_free_acl_table;
acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
sizeof(uintptr_t),
GFP_KERNEL);
if (!acl_table->vlan_allowed_bitmap)
goto err_destroy_ft;
goto out;
err_destroy_ft:
mlx5_vacl_table_destroy_ft(acl_table->ft);
acl_table->ft = NULL;
err_free_acl_table:
kfree(acl_table);
acl_table = NULL;
out:
return (void *)acl_table;
}
EXPORT_SYMBOL(mlx5_vacl_table_create);
void mlx5_vacl_table_cleanup(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
mlx5_vacl_table_destroy_ft(acl_t);
kfree(acl_table->vlan_allowed_bitmap);
kfree(acl_table);
}
EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
return 0;
__set_bit(vlan, acl_table->vlan_allowed_bitmap);
if (!acl_table->vlan_filter_applied)
return 0;
err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
if (err)
goto err_clear_vbit;
goto out;
err_clear_vbit:
__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
out:
return err;
}
EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
return;
__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
if (!acl_table->vlan_filter_applied)
return;
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[vlan]);
}
EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
acl_table->vlan_filter_enabled = true;
return mlx5_vacl_table_apply_vlan_filter(acl_t);
}
EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
acl_table->vlan_filter_enabled = false;
mlx5_vacl_table_disapply_vlan_filter(acl_t);
}
EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
int mlx5_vacl_table_drop_untagged(void *acl_t)
{
return mlx5_vacl_table_apply_untagged(acl_t,
MLX5_FLOW_CONTEXT_ACTION_DROP);
}
EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
int mlx5_vacl_table_allow_untagged(void *acl_t)
{
return mlx5_vacl_table_apply_untagged(acl_t,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
}
EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
{
return mlx5_vacl_table_apply_unknown_vlan(acl_t,
MLX5_FLOW_CONTEXT_ACTION_DROP);
}
EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
{
return mlx5_vacl_table_apply_unknown_vlan(acl_t,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
}
EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (spoofchk == acl_table->spoofchk_enabled) {
if (!spoofchk ||
(spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
return 0;
}
ether_addr_copy(acl_table->smac, vport_mac);
if (spoofchk != acl_table->spoofchk_enabled) {
mlx5_vacl_table_destroy_ft(acl_t);
err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
} else {
mlx5_vacl_table_disapply_all_filters(acl_t);
err = mlx5_vacl_table_apply_all_filters(acl_t);
}
return err;
}
EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);

View File

@ -1,479 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <dev/mlx5/driver.h>
#include <dev/mlx5/flow_table.h>
#include "mlx5_core.h"
struct mlx5_ftg {
struct mlx5_flow_table_group g;
u32 id;
u32 start_ix;
};
struct mlx5_flow_table {
struct mlx5_core_dev *dev;
u8 level;
u8 type;
u32 id;
u16 vport;
struct mutex mutex; /* sync bitmap alloc */
u16 num_groups;
struct mlx5_ftg *group;
unsigned long *bitmap;
u32 size;
};
static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
u32 flow_index, void *flow_context)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
u32 *in;
void *in_flow_context;
int fcdls =
MLX5_GET(flow_context, flow_context, destination_list_size) *
MLX5_ST_SZ_BYTES(dest_format_struct);
int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
int err;
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, !!ft->vport);
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, flow_index);
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
memcpy(in_flow_context, flow_context,
MLX5_ST_SZ_BYTES(flow_context) + fcdls);
MLX5_SET(flow_context, in_flow_context, group_id, ft->group[group_ix].id);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
sizeof(out));
kvfree(in);
return err;
}
static int mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
MLX5_SET_DFTEI(in, vport_number, ft->vport);
MLX5_SET_DFTEI(in, other_vport, !!ft->vport);
MLX5_SET_DFTEI(in, table_type, ft->type);
MLX5_SET_DFTEI(in, table_id, ft->id);
MLX5_SET_DFTEI(in, flow_index, flow_index);
MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
return mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
sizeof(out));
}
static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
MLX5_SET_DFGI(in, vport_number, ft->vport);
MLX5_SET_DFGI(in, other_vport, !!ft->vport);
MLX5_SET_DFGI(in, table_type, ft->type);
MLX5_SET_DFGI(in, table_id, ft->id);
MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET_DFGI(in, group_id, ft->group[i].id);
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
u32 *in;
void *in_match_criteria;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_group *g = &ft->group[i].g;
u32 start_ix = ft->group[i].start_ix;
u32 end_ix = start_ix + (1 << g->log_sz) - 1;
int err;
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
return -ENOMEM;
}
in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria);
memset(out, 0, sizeof(out));
#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
MLX5_SET_CFGI(in, vport_number, ft->vport);
MLX5_SET_CFGI(in, other_vport, !!ft->vport);
MLX5_SET_CFGI(in, table_type, ft->type);
MLX5_SET_CFGI(in, table_id, ft->id);
MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET_CFGI(in, start_flow_index, start_ix);
MLX5_SET_CFGI(in, end_flow_index, end_ix);
MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
memcpy(in_match_criteria, g->match_criteria,
MLX5_ST_SZ_BYTES(fte_match_param));
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
sizeof(out));
if (!err)
ft->group[i].id = MLX5_GET(create_flow_group_out, out,
group_id);
kvfree(in);
return err;
}
static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
{
int i;
for (i = 0; i < ft->num_groups; i++)
mlx5_destroy_flow_group_cmd(ft, i);
}
static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
{
int err;
int i;
for (i = 0; i < ft->num_groups; i++) {
err = mlx5_create_flow_group_cmd(ft, i);
if (err)
goto err_destroy_flow_table_groups;
}
return 0;
err_destroy_flow_table_groups:
for (i--; i >= 0; i--)
mlx5_destroy_flow_group_cmd(ft, i);
return err;
}
static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
int err;
memset(in, 0, sizeof(in));
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport, !!ft->vport);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, level, ft->level);
MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
ft->id = MLX5_GET(create_flow_table_out, out, table_id);
return 0;
}
static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
MLX5_SET_DFTI(in, vport_number, ft->vport);
MLX5_SET_DFTI(in, other_vport, !!ft->vport);
MLX5_SET_DFTI(in, table_type, ft->type);
MLX5_SET_DFTI(in, table_id, ft->id);
MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
u32 *match_criteria, int *group_ix)
{
void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
misc_parameters);
void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
inner_headers);
int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
int i;
for (i = 0; i < ft->num_groups; i++) {
struct mlx5_flow_table_group *g = &ft->group[i].g;
void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
outer_headers);
void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
misc_parameters);
void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
inner_headers);
if (g->match_criteria_enable != match_criteria_enable)
continue;
if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
continue;
if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
continue;
if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
continue;
*group_ix = i;
return 0;
}
return -EINVAL;
}
static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
{
struct mlx5_ftg *g = &ft->group[group_ix];
int err = 0;
mutex_lock(&ft->mutex);
*ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
err = -ENOSPC;
else
__set_bit(*ix, ft->bitmap);
mutex_unlock(&ft->mutex);
return err;
}
static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
{
__clear_bit(ix, ft->bitmap);
}
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
void *match_criteria, void *flow_context,
u32 *flow_index)
{
struct mlx5_flow_table *ft = flow_table;
int group_ix;
int err;
err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
&group_ix);
if (err) {
mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
return err;
}
err = alloc_flow_index(ft, group_ix, flow_index);
if (err) {
mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
return err;
}
err = mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
if (err)
mlx5_free_flow_index(ft, *flow_index);
return err;
}
EXPORT_SYMBOL(mlx5_add_flow_table_entry);
int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
{
struct mlx5_flow_table *ft = flow_table;
int ret;
ret = mlx5_del_flow_entry_cmd(ft, flow_index);
if (!ret)
mlx5_free_flow_index(ft, flow_index);
return ret;
}
EXPORT_SYMBOL(mlx5_del_flow_table_entry);
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
u16 vport,
u16 num_groups,
struct mlx5_flow_table_group *group)
{
struct mlx5_flow_table *ft;
u32 start_ix = 0;
u32 ft_size = 0;
void *gr;
void *bm;
int err;
int i;
for (i = 0; i < num_groups; i++)
ft_size += (1 << group[i].log_sz);
ft = kzalloc(sizeof(*ft), GFP_KERNEL);
gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
ft->group = gr;
ft->bitmap = bm;
ft->num_groups = num_groups;
ft->level = level;
ft->vport = vport;
ft->type = table_type;
ft->size = ft_size;
ft->dev = dev;
mutex_init(&ft->mutex);
for (i = 0; i < ft->num_groups; i++) {
memcpy(&ft->group[i].g, &group[i], sizeof(*group));
ft->group[i].start_ix = start_ix;
start_ix += 1 << group[i].log_sz;
}
err = mlx5_create_flow_table_cmd(ft);
if (err)
goto err_free_ft;
err = mlx5_create_flow_table_groups(ft);
if (err)
goto err_destroy_flow_table_cmd;
return ft;
err_destroy_flow_table_cmd:
mlx5_destroy_flow_table_cmd(ft);
err_free_ft:
mlx5_core_warn(dev, "failed to alloc flow table\n");
kfree(bm);
kfree(gr);
kfree(ft);
return NULL;
}
EXPORT_SYMBOL(mlx5_create_flow_table);
void mlx5_destroy_flow_table(void *flow_table)
{
struct mlx5_flow_table *ft = flow_table;
mlx5_destroy_flow_table_groups(ft);
mlx5_destroy_flow_table_cmd(ft);
kfree(ft->bitmap);
kfree(ft->group);
kfree(ft);
}
EXPORT_SYMBOL(mlx5_destroy_flow_table);
u32 mlx5_get_flow_table_id(void *flow_table)
{
struct mlx5_flow_table *ft = flow_table;
return ft->id;
}
EXPORT_SYMBOL(mlx5_get_flow_table_id);
int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
u8 vport_num, u8 table_type, u32 table_id,
u32 underlay_qpn)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
int err;
int is_group_manager;
is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
memset(in, 0, sizeof(in));
MLX5_SET(set_flow_table_root_in, in, op_mod, op_mod);
MLX5_SET(set_flow_table_root_in, in, table_type, table_type);
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (op_mod == MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET)
MLX5_SET(set_flow_table_root_in, in, table_id, table_id);
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
if (vport_num) {
if (is_group_manager) {
MLX5_SET(set_flow_table_root_in, in, other_vport,
1);
MLX5_SET(set_flow_table_root_in, in, vport_number,
vport_num);
} else {
return -EPERM;
}
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
return 0;
}
EXPORT_SYMBOL(mlx5_set_flow_table_root);

View File

@ -0,0 +1,302 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <linux/types.h>
#include <linux/module.h>
#include <dev/mlx5/mlx5_ifc.h>
#include <dev/mlx5/device.h>
#include <dev/mlx5/fs.h>
#include "fs_core.h"
#include "mlx5_core.h"
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
enum fs_ft_type type,
unsigned int id)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, type);
MLX5_SET(set_flow_table_root_in, in, table_id, id);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int level,
unsigned int log_size, unsigned int *table_id)
{
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
int err;
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size,
log_size);
if (vport) {
MLX5_SET(create_flow_table_in, in, vport_number, vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
return 0;
}
int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, type);
MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
if (vport) {
MLX5_SET(destroy_flow_table_in, in, vport_number, vport);
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
u32 *in,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int *group_id)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
int err;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
if (!dev)
return -EINVAL;
memset(out, 0, sizeof(out));
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, type);
MLX5_SET(create_flow_group_in, in, table_id, table_id);
if (vport) {
MLX5_SET(create_flow_group_in, in, vport_number, vport);
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out, group_id);
return err;
}
int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int group_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, type);
MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
if (vport) {
MLX5_SET(destroy_flow_group_in, in, vport_number, vport);
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
u32 *match_val,
enum fs_ft_type type, unsigned int table_id,
unsigned int index, unsigned int group_id,
unsigned int flow_tag,
unsigned short action, int dest_size,
struct list_head *dests) /* mlx5_flow_desination */
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
u32 *in;
unsigned int inlen;
struct mlx5_flow_rule *dst;
void *in_flow_context;
void *in_match_value;
void *in_dests;
int err;
int opmod = 0;
int modify_mask = 0;
int atomic_mod_cap;
if (action != MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)
dest_size = 0;
inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
dest_size * MLX5_ST_SZ_BYTES(dest_format_struct);
if (!dev)
return -EINVAL;
if (*fte_status & FS_FTE_STATUS_EXISTING) {
atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -ENOTSUPP;
opmod = 1;
modify_mask = 1 <<
MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
}
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(dev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
MLX5_SET(set_fte_in, in, table_type, type);
MLX5_SET(set_fte_in, in, table_id, table_id);
MLX5_SET(set_fte_in, in, flow_index, index);
if (vport) {
MLX5_SET(set_fte_in, in, vport_number, vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
}
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, flow_tag);
MLX5_SET(flow_context, in_flow_context, action, action);
MLX5_SET(flow_context, in_flow_context, destination_list_size,
dest_size);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, match_val, MLX5_ST_SZ_BYTES(fte_match_param));
if (dest_size) {
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
list_for_each_entry(dst, dests, base.list) {
unsigned int id;
MLX5_SET(dest_format_struct, in_dests, destination_type,
dst->dest_attr.type);
if (dst->dest_attr.type ==
MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE)
id = dst->dest_attr.ft->id;
else
id = dst->dest_attr.tir_num;
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
}
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
sizeof(out));
if (!err)
*fte_status |= FS_FTE_STATUS_EXISTING;
kvfree(in);
return err;
}
int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
enum fs_ft_type type, unsigned int table_id,
unsigned int index)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
int err;
if (!(*fte_status & FS_FTE_STATUS_EXISTING))
return 0;
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, type);
MLX5_SET(delete_fte_in, in, table_id, table_id);
MLX5_SET(delete_fte_in, in, flow_index, index);
if (vport) {
MLX5_SET(delete_fte_in, in, vport_number, vport);
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*fte_status = 0;
return err;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <dev/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
#include "fs_core.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@ -794,8 +795,21 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev);
err = mlx5_init_fs(dev);
if (err) {
mlx5_core_err(dev, "flow steering init %d\n", err);
goto err_init_tables;
}
return 0;
err_init_tables:
mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
unmap_bf_area(dev);
err_stop_eqs:
mlx5_stop_eqs(dev);
@ -848,6 +862,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
mlx5_cleanup_fs(dev);
mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);

View File

@ -590,10 +590,13 @@ enum {
MLX5E_NUM_RQT = 2,
};
struct mlx5_flow_rule;
struct mlx5e_eth_addr_info {
u8 addr [ETH_ALEN + 2];
u32 tt_vec;
u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
/* flow table rule per traffic type */
struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
};
#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
@ -622,15 +625,24 @@ enum {
struct mlx5e_vlan_db {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 active_vlans_ft_ix[VLAN_N_VID];
u32 untagged_rule_ft_ix;
u32 any_vlan_rule_ft_ix;
struct mlx5_flow_rule *active_vlans_ft_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_ft_rule;
struct mlx5_flow_rule *any_cvlan_ft_rule;
struct mlx5_flow_rule *any_svlan_ft_rule;
bool filter_disabled;
};
struct mlx5e_flow_table {
void *vlan;
void *main;
int num_groups;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
};
struct mlx5e_flow_tables {
struct mlx5_flow_namespace *ns;
struct mlx5e_flow_table vlan;
struct mlx5e_flow_table main;
struct mlx5e_flow_table inner_rss;
};
struct mlx5e_priv {
@ -657,7 +669,7 @@ struct mlx5e_priv {
u32 rqtn;
u32 tirn[MLX5E_NUM_TT];
struct mlx5e_flow_table ft;
struct mlx5e_flow_tables fts;
struct mlx5e_eth_addr_db eth_addr;
struct mlx5e_vlan_db vlan;

File diff suppressed because it is too large Load Diff

View File

@ -384,26 +384,31 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_counter[0x1];
u8 flow_modify_en[0x1];
u8 modify_root[0x1];
u8 reserved_0[0x1b];
u8 identified_miss_table[0x1];
u8 flow_table_modify[0x1];
u8 encap[0x1];
u8 decap[0x1];
u8 reset_root_to_default[0x1];
u8 reserved_at_a[0x16];
u8 reserved_1[0x2];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 reserved_2[0x10];
u8 reserved_at_28[0x10];
u8 max_ft_level[0x8];
u8 reserved_3[0x20];
u8 reserved_at_40[0x20];
u8 reserved_4[0x18];
u8 reserved_at_60[0x18];
u8 log_max_ft_num[0x8];
u8 reserved_5[0x10];
u8 reserved_at_80[0x10];
u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
u8 reserved_6[0x18];
u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
u8 reserved_7[0x40];
u8 reserved_at_c0[0x40];
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
@ -441,6 +446,22 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 reserved_0[0x20];
};
struct mlx5_ifc_ipv4_layout_bits {
u8 reserved_at_0[0x60];
u8 ipv4[0x20];
};
struct mlx5_ifc_ipv6_layout_bits {
u8 ipv6[16][0x8];
};
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
u8 reserved_at_0[0x80];
};
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@ -471,9 +492,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 udp_sport[0x10];
u8 udp_dport[0x10];
u8 src_ip[4][0x20];
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
u8 dst_ip[4][0x20];
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
struct mlx5_ifc_fte_match_set_misc_bits {
@ -700,7 +721,10 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
};
struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_0[0x200];
u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
u8 reserved_at_3[0x1fd];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
@ -2540,6 +2564,29 @@ union mlx5_ifc_hca_cap_union_bits {
u8 reserved_0[0x8000];
};
enum {
MLX5_FLOW_TABLE_CONTEXT_TABLE_MISS_ACTION_DEFAULT = 0x0,
MLX5_FLOW_TABLE_CONTEXT_TABLE_MISS_ACTION_IDENTIFIED = 0x1,
};
struct mlx5_ifc_flow_table_context_bits {
u8 encap_en[0x1];
u8 decap_en[0x1];
u8 reserved_at_2[0x2];
u8 table_miss_action[0x4];
u8 level[0x8];
u8 reserved_at_10[0x8];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
u8 table_miss_id[0x18];
u8 reserved_at_40[0x8];
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0xe0];
};
struct mlx5_ifc_esw_vport_context_bits {
u8 reserved_0[0x3];
u8 vport_svlan_strip[0x1];
@ -4486,18 +4533,13 @@ struct mlx5_ifc_query_hca_cap_in_bits {
struct mlx5_ifc_query_flow_table_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x80];
u8 reserved_at_40[0x80];
u8 reserved_2[0x8];
u8 level[0x8];
u8 reserved_3[0x8];
u8 log_size[0x8];
u8 reserved_4[0x120];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_query_flow_table_in_bits {
@ -5394,6 +5436,43 @@ struct mlx5_ifc_modify_hca_vport_context_in_bits {
struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
};
struct mlx5_ifc_modify_flow_table_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
enum {
MLX5_MODIFY_FLOW_TABLE_SELECT_MISS_ACTION_AND_ID = 0x1,
MLX5_MODIFY_FLOW_TABLE_SELECT_LAG_MASTER_NEXT_TABLE_ID = 0x8000,
};
struct mlx5_ifc_modify_flow_table_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_modify_esw_vport_context_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@ -6906,28 +6985,23 @@ struct mlx5_ifc_create_flow_table_out_bits {
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 reserved_at_10[0x10];
u8 reserved_1[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_3[0x20];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_4[0x18];
u8 reserved_at_88[0x18];
u8 reserved_5[0x20];
u8 reserved_at_a0[0x20];
u8 reserved_6[0x8];
u8 level[0x8];
u8 reserved_7[0x8];
u8 log_size[0x8];
u8 reserved_8[0x120];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_create_flow_group_out_bits {

View File

@ -8,8 +8,8 @@ mlx5_cmd.c \
mlx5_cq.c \
mlx5_diagnostics.c \
mlx5_eq.c \
mlx5_eswitch_vacl.c \
mlx5_flow_table.c \
mlx5_fs_cmd.c \
mlx5_fs_tree.c \
mlx5_fw.c \
mlx5_health.c \
mlx5_mad.c \