Update the mlx5 shared driver code to the latest version, which

include the following list of changes:

- Added eswitch ACL table management
  Introduce API for managing ACL table.
  This API include the following features:
  1) vlan filter - for VST/VGT+ support.
  2) spoofcheck.
  3) robust functionality to allow/drop general untagged/tagged traffic.
  4) support for both ingress and egress ACL types.

- Added loopback filter to the vacl table.

- Added multicast list set in the vPort context

- Added promiscuous mode set in the vPort context

- Set the vlan list in vPort context
  1) Check caps if VLAN list is not longer than FW supports
  2) Set MODIFY_NIC_VPORT_CONTEXT command

- Changed MLX5_EEPROM_MAX_BYTES from 48 to 32 so that a single EEPROM
  reading cannot cross the 128-byte boundary. Previously reading the
  MCIA register was done in batches of 48 bytes. The third reading
  would then by-pass the 127th byte, which means that part of the low
  page and part of the high page would be read at the same time, which
  created a bug:
    1st: 0-47 bytes
    2nd: 48-95 bytes
    3rd: 96-143 bytes

MFC after:	1 week
Sponsored by:	Mellanox Technologies
Differential Revision:	https://reviews.freebsd.org/D4411
This commit is contained in:
Hans Petter Selasky 2015-12-07 13:16:48 +00:00
parent 278ce1c919
commit 98a998d5e7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=291939
8 changed files with 1024 additions and 3 deletions

View File

@ -1023,6 +1023,25 @@ enum {
MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
};
enum {
MLX5_UC_ADDR_CHANGE = (1 << 0),
MLX5_MC_ADDR_CHANGE = (1 << 1),
MLX5_VLAN_CHANGE = (1 << 2),
MLX5_PROMISC_CHANGE = (1 << 3),
MLX5_MTU_CHANGE = (1 << 4),
};
enum mlx5_list_type {
MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0,
MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1,
MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
};
enum {
MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
};
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
@ -1087,6 +1106,22 @@ enum mlx5_cap_type {
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(dev, \
flow_table_properties_esw_acl_egress.cap)
#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
flow_table_properties_esw_acl_egress.cap)
#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(dev, \
flow_table_properties_esw_acl_ingress.cap)
#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
flow_table_properties_esw_acl_ingress.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)

View File

@ -934,7 +934,7 @@ struct mlx5_profile {
};
#define MLX5_EEPROM_MAX_BYTES 48
#define MLX5_EEPROM_MAX_BYTES 32
#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_EEPROM_REVISION_ID_BYTE_MASK 0x0000ff00
#define MLX5_EEPROM_PAGE_3_VALID_BIT_MASK 0x00040000

View File

@ -0,0 +1,46 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef MLX5_ESWITCH_VACL_TABLE_H
#define MLX5_ESWITCH_VACL_TABLE_H
#include <dev/mlx5/driver.h>
void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
u16 vport, bool is_egress);
void mlx5_vacl_table_cleanup(void *acl_t);
int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan);
void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan);
int mlx5_vacl_table_enable_vlan_filter(void *acl_t);
void mlx5_vacl_table_disable_vlan_filter(void *acl_t);
int mlx5_vacl_table_drop_untagged(void *acl_t);
int mlx5_vacl_table_allow_untagged(void *acl_t);
int mlx5_vacl_table_drop_unknown_vlan(void *acl_t);
int mlx5_vacl_table_allow_unknown_vlan(void *acl_t);
int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac);
#endif /* MLX5_ESWITCH_VACL_TABLE_H */

View File

@ -62,7 +62,6 @@ enum {
(1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
@ -473,6 +472,10 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
async_event_mask |= (1ull <<
MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT);
if (MLX5_CAP_GEN(dev, nic_vport_change_event))
async_event_mask |= (1ull <<
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);

View File

@ -0,0 +1,803 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <linux/etherdevice.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/flow_table.h>
#include <dev/mlx5/eswitch_vacl.h>
#include "mlx5_core.h"
enum {
MLX5_ACL_LOOPBACK_GROUP_IDX = 0,
MLX5_ACL_UNTAGGED_GROUP_IDX = 1,
MLX5_ACL_VLAN_GROUP_IDX = 2,
MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX = 3,
MLX5_ACL_DEFAULT_GROUP_IDX = 4,
MLX5_ACL_GROUPS_NUM,
};
struct mlx_vacl_fr {
bool applied;
u32 fi;
u16 action;
};
struct mlx5_vacl_table {
struct mlx5_core_dev *dev;
u16 vport;
void *ft;
int max_ft_size;
int acl_type;
struct mlx_vacl_fr loopback_fr;
struct mlx_vacl_fr untagged_fr;
struct mlx_vacl_fr unknown_vlan_fr;
struct mlx_vacl_fr default_fr;
bool vlan_filter_enabled;
bool vlan_filter_applied;
unsigned long *vlan_allowed_bitmap;
u32 vlan_fi_table[4096];
bool spoofchk_enabled;
u8 smac[ETH_ALEN];
};
static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
u8 *smac;
int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
int err = 0;
if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
return -EINVAL;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
/* Apply vlan rule */
MLX5_SET(flow_context, flow_context, action,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 1);
MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
vlan);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
0xfff);
if (acl_table->spoofchk_enabled) {
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
in_match_criteria, flow_context,
&acl_table->vlan_fi_table[vlan]);
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
void *mv_misc = NULL;
void *mc_misc = NULL;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->loopback_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->loopback_fr.fi);
/* Apply new loopback rule */
MLX5_SET(flow_context, flow_context, action, new_action);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
mv_misc = MLX5_ADDR_OF(fte_match_param, in_match_value,
misc_parameters);
mc_misc = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
in_match_criteria, flow_context,
&acl_table->loopback_fr.fi);
if (err) {
acl_table->loopback_fr.applied = false;
} else {
acl_table->loopback_fr.applied = true;
acl_table->loopback_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 default_mc_enable = 0;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
int err = 0;
if (!acl_table->spoofchk_enabled)
return -EINVAL;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->default_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->default_fr.fi);
/* Apply new default rule */
MLX5_SET(flow_context, flow_context, action, new_action);
err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
in_match_criteria, flow_context,
&acl_table->default_fr.fi);
if (err) {
acl_table->default_fr.applied = false;
} else {
acl_table->default_fr.applied = true;
acl_table->default_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
u8 *smac;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->untagged_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->untagged_fr.fi);
/* Apply new untagged rule */
MLX5_SET(flow_context, flow_context, action, new_action);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 0);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
if (acl_table->spoofchk_enabled) {
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
in_match_criteria, flow_context,
&acl_table->untagged_fr.fi);
if (err) {
acl_table->untagged_fr.applied = false;
} else {
acl_table->untagged_fr.applied = true;
acl_table->untagged_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
MLX5_MATCH_OUTER_HEADERS;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
u8 *smac;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->unknown_vlan_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->unknown_vlan_fr.fi);
/* Apply new unknown vlan rule */
MLX5_SET(flow_context, flow_context, action, new_action);
if (acl_table->spoofchk_enabled) {
in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
match_value);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
in_match_criteria, flow_context,
&acl_table->unknown_vlan_fr.fi);
if (err) {
acl_table->unknown_vlan_fr.applied = false;
} else {
acl_table->unknown_vlan_fr.applied = true;
acl_table->unknown_vlan_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int index = 0;
int err_index = 0;
int err = 0;
if (acl_table->vlan_filter_applied)
return 0;
for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
index < 4096;
index = find_next_bit(acl_table->vlan_allowed_bitmap,
4096, ++index)) {
err = mlx5_vacl_table_allow_vlan(acl_t, index);
if (err)
goto err_disable_vlans;
}
acl_table->vlan_filter_applied = true;
return 0;
err_disable_vlans:
for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
err_index < index;
err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
++err_index)) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[err_index]);
}
return err;
}
static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int index = 0;
if (!acl_table->vlan_filter_applied)
return;
for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
index < 4096;
index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
++index)) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[index]);
}
acl_table->vlan_filter_applied = false;
}
static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
if (acl_table->default_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->default_fr.fi);
acl_table->default_fr.applied = false;
}
if (acl_table->unknown_vlan_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->unknown_vlan_fr.fi);
acl_table->unknown_vlan_fr.applied = false;
}
if (acl_table->loopback_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->loopback_fr.fi);
acl_table->loopback_fr.applied = false;
}
if (acl_table->untagged_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->untagged_fr.fi);
acl_table->untagged_fr.applied = false;
}
if (acl_table->vlan_filter_applied) {
mlx5_vacl_table_disapply_vlan_filter(acl_t);
acl_table->vlan_filter_applied = false;
}
}
static int mlx5_vacl_table_apply_all_filters(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
err = mlx5_vacl_table_apply_default(acl_table,
acl_table->default_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->unknown_vlan_fr.applied) {
err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
acl_table->unknown_vlan_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->loopback_fr.applied &&
acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
err = mlx5_vacl_table_apply_loopback_filter(
acl_table,
acl_table->loopback_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->untagged_fr.applied) {
err = mlx5_vacl_table_apply_untagged(acl_table,
acl_table->untagged_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
err = mlx5_vacl_table_apply_vlan_filter(acl_t);
if (err)
goto err_disapply_all;
}
goto out;
err_disapply_all:
mlx5_vacl_table_disapply_all_filters(acl_t);
out:
return err;
}
static void mlx5_vacl_table_destroy_ft(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
mlx5_vacl_table_disapply_all_filters(acl_t);
if (acl_table->ft)
mlx5_destroy_flow_table(acl_table->ft);
acl_table->ft = NULL;
}
static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int log_acl_ft_size;
int err = 0;
int groups_num = MLX5_ACL_GROUPS_NUM - 1;
int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
u8 *smac;
struct mlx5_flow_table_group *g;
if (acl_table->ft)
return -EINVAL;
g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
if (!g)
goto out;
acl_table->spoofchk_enabled = spoofchk;
/*
* for vlan group
*/
log_acl_ft_size = 4096;
/*
* for loopback filter rule
*/
log_acl_ft_size += 1;
/*
* for untagged rule
*/
log_acl_ft_size += 1;
/*
* for unknown vlan rule
*/
log_acl_ft_size += 1;
/*
* for default rule
*/
log_acl_ft_size += 1;
log_acl_ft_size = order_base_2(log_acl_ft_size);
log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
if (log_acl_ft_size < 2)
goto out;
if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
/* Loopback filter group */
g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
MLX5_MATCH_MISC_PARAMETERS;
MLX5_SET_TO_ONES(fte_match_param,
g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
misc_parameters.source_port);
groups_num++;
shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
}
/* Untagged traffic group */
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
outer_headers.vlan_tag, 1);
if (spoofchk) {
smac = MLX5_ADDR_OF(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/* Allowed vlans group */
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
outer_headers.vlan_tag, 1);
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
outer_headers.first_vid, 0xfff);
if (spoofchk) {
smac = MLX5_ADDR_OF(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/* Unknown vlan traffic group */
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
(spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
if (spoofchk) {
smac = MLX5_ADDR_OF(
fte_match_param,
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/*
* Default group - for spoofchk only.
*/
g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
acl_table->ft = mlx5_create_flow_table(acl_table->dev,
0,
acl_table->acl_type,
acl_table->vport,
groups_num,
g);
if (!acl_table->ft) {
err = -ENOMEM;
goto out;
}
err = mlx5_vacl_table_apply_all_filters(acl_t);
if (err)
goto err_destroy_ft;
goto out;
err_destroy_ft:
mlx5_vacl_table_destroy_ft(acl_table->ft);
acl_table->ft = NULL;
out:
kfree(g);
return err;
}
void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
u16 vport, bool is_egress)
{
struct mlx5_vacl_table *acl_table;
int err = 0;
if (is_egress && !MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev, ft_support))
return NULL;
if (!is_egress && !MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev, ft_support))
return NULL;
acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
if (!acl_table)
return NULL;
acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
acl_table->max_ft_size = (is_egress ?
MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev,
log_max_ft_size) :
MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev,
log_max_ft_size));
acl_table->dev = dev;
acl_table->vport = vport;
/*
* default behavior : Allow and if spoofchk drop the default
*/
acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
err = mlx5_vacl_table_create_ft(acl_table, false);
if (err)
goto err_free_acl_table;
acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
sizeof(uintptr_t),
GFP_KERNEL);
if (!acl_table->vlan_allowed_bitmap)
goto err_destroy_ft;
goto out;
err_destroy_ft:
mlx5_vacl_table_destroy_ft(acl_table->ft);
acl_table->ft = NULL;
err_free_acl_table:
kfree(acl_table);
acl_table = NULL;
out:
return (void *)acl_table;
}
EXPORT_SYMBOL(mlx5_vacl_table_create);
void mlx5_vacl_table_cleanup(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
mlx5_vacl_table_destroy_ft(acl_t);
kfree(acl_table->vlan_allowed_bitmap);
kfree(acl_table);
}
EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
return 0;
__set_bit(vlan, acl_table->vlan_allowed_bitmap);
if (!acl_table->vlan_filter_applied)
return 0;
err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
if (err)
goto err_clear_vbit;
goto out;
err_clear_vbit:
__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
out:
return err;
}
EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
return;
__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
if (!acl_table->vlan_filter_applied)
return;
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[vlan]);
}
EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
acl_table->vlan_filter_enabled = true;
return mlx5_vacl_table_apply_vlan_filter(acl_t);
}
EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
acl_table->vlan_filter_enabled = false;
mlx5_vacl_table_disapply_vlan_filter(acl_t);
}
EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
int mlx5_vacl_table_drop_untagged(void *acl_t)
{
return mlx5_vacl_table_apply_untagged(acl_t,
MLX5_FLOW_CONTEXT_ACTION_DROP);
}
EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
int mlx5_vacl_table_allow_untagged(void *acl_t)
{
return mlx5_vacl_table_apply_untagged(acl_t,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
}
EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
{
return mlx5_vacl_table_apply_unknown_vlan(acl_t,
MLX5_FLOW_CONTEXT_ACTION_DROP);
}
EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
{
return mlx5_vacl_table_apply_unknown_vlan(acl_t,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
}
EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (spoofchk == acl_table->spoofchk_enabled) {
if (!spoofchk ||
(spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
return 0;
}
ether_addr_copy(acl_table->smac, vport_mac);
if (spoofchk != acl_table->spoofchk_enabled) {
mlx5_vacl_table_destroy_ft(acl_t);
err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
} else {
mlx5_vacl_table_disapply_all_filters(acl_t);
err = mlx5_vacl_table_apply_all_filters(acl_t);
}
return err;
}
EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);

View File

@ -328,7 +328,8 @@ int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
MLX5_SET(modify_nic_vport_context_in, in,
field_select.addresses_list, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.allowed_list_type, 0);
nic_vport_context.allowed_list_type,
MLX5_NIC_VPORT_LIST_TYPE_UC);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.allowed_list_size, 1);
@ -345,6 +346,131 @@ int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
u16 *vlan_list, int list_len)
{
void *in, *ctx;
int i, err;
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
if (list_len > max_list_size) {
mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
list_len, max_list_size);
return -ENOSPC;
}
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(dev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_nic_vport_context_in, in,
other_vport, 1);
MLX5_SET(modify_nic_vport_context_in, in,
field_select.addresses_list, 1);
ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
MLX5_SET(nic_vport_context, ctx, allowed_list_type,
MLX5_NIC_VPORT_LIST_TYPE_VLAN);
MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
for (i = 0; i < list_len; i++) {
u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
current_uc_mac_address[i]);
MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
}
err = mlx5_modify_nic_vport_context(dev, in, inlen);
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
u64 *addr_list, size_t addr_list_len)
{
void *in, *ctx;
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
+ MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
int err;
size_t i;
int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
if ((int)addr_list_len > max_list_sz) {
mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
(int)addr_list_len, max_list_sz);
return -ENOSPC;
}
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(mdev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_nic_vport_context_in, in,
other_vport, 1);
MLX5_SET(modify_nic_vport_context_in, in,
field_select.addresses_list, 1);
ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
MLX5_SET(nic_vport_context, ctx, allowed_list_type,
MLX5_NIC_VPORT_LIST_TYPE_MC);
MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
for (i = 0; i < addr_list_len; i++) {
u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
current_uc_mac_address[i]);
u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
mac_addr_47_32);
ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
}
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
bool promisc_mc, bool promisc_uc,
bool promisc_all)
{
u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
nic_vport_context);
memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_nic_vport_context_in, in,
other_vport, 1);
MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
if (promisc_mc)
MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
if (promisc_uc)
MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
if (promisc_all)
MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
}
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
u8 *addr)
{

View File

@ -42,6 +42,13 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u32 vport, u8 *addr);
int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
bool other_vport, u8 *addr);
int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
u16 *vlan_list, int list_len);
int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
u64 *addr_list, size_t addr_list_len);
int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
bool promisc_mc, bool promisc_uc,
bool promisc_all);
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
u8 *addr);
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);

View File

@ -7,6 +7,7 @@ mlx5_alloc.c \
mlx5_cmd.c \
mlx5_cq.c \
mlx5_eq.c \
mlx5_eswitch_vacl.c \
mlx5_flow_table.c \
mlx5_fw.c \
mlx5_health.c \