mlx5en: Create and destroy all flow tables and rules when the network interface attaches and detaches.

Previously flow steering tables and rules were only created and destroyed
at link up and down events, respectivly. Due to new requirements for adding
TLS RX flow tables and rules, the main flow steering table must always be
available as there are permanent redirections from the TLS RX flow table
to the vlan flow table.

MFC after:	1 week
Sponsored by:	NVIDIA Networking
This commit is contained in:
Hans Petter Selasky 2022-02-01 16:20:12 +01:00
parent a8e715d21b
commit e059c120b4
3 changed files with 136 additions and 132 deletions

View File

@ -978,6 +978,7 @@ struct mlx5e_eth_addr_db {
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_OPENED,
MLX5E_STATE_FLOW_RULES_READY,
};
enum {
@ -1188,23 +1189,21 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_dim_work(struct work_struct *);
void mlx5e_dim_build_cq_param(struct mlx5e_priv *, struct mlx5e_cq_param *);
int mlx5e_open_flow_table(struct mlx5e_priv *priv);
void mlx5e_close_flow_table(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
int mlx5e_open_flow_tables(struct mlx5e_priv *priv);
void mlx5e_close_flow_tables(struct mlx5e_priv *priv);
int mlx5e_open_flow_rules(struct mlx5e_priv *priv);
void mlx5e_close_flow_rules(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_vlan_rx_add_vid(void *, struct ifnet *, u16);
void mlx5e_vlan_rx_kill_vid(void *, struct ifnet *, u16);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
void mlx5e_vxlan_start(void *arg, struct ifnet *ifp, sa_family_t family,
u_int port);
void mlx5e_vxlan_stop(void *arg, struct ifnet *ifp, sa_family_t family,
u_int port);
int mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv);
void mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -91,6 +91,8 @@ struct mlx5e_eth_addr_hash_node {
struct mlx5e_eth_addr_info ai;
};
static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
static inline int
mlx5e_hash_eth_addr(const u8 * addr)
{
@ -764,8 +766,7 @@ mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) ?
priv->fts.vxlan.t : priv->fts.main.t;
dest.ft = priv->fts.vxlan.t;
mc_enable = MLX5_MATCH_OUTER_HEADERS;
@ -899,7 +900,7 @@ mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
priv->vlan.filter_disabled = false;
if (priv->ifp->if_flags & IFF_PROMISC)
return;
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_del_any_vid_rules(priv);
}
}
@ -911,7 +912,7 @@ mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
priv->vlan.filter_disabled = true;
if (priv->ifp->if_flags & IFF_PROMISC)
return;
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_add_any_vid_rules(priv);
}
}
@ -926,7 +927,7 @@ mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
PRIV_LOCK(priv);
if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
test_bit(MLX5E_STATE_OPENED, &priv->state))
test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
PRIV_UNLOCK(priv);
}
@ -941,12 +942,12 @@ mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
PRIV_LOCK(priv);
clear_bit(vid, priv->vlan.active_vlans);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
PRIV_UNLOCK(priv);
}
int
static int
mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
{
int err;
@ -975,7 +976,7 @@ mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
return (err);
}
void
static void
mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
{
int i;
@ -1248,19 +1249,18 @@ mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
hn->action = MLX5E_ACTION_DEL;
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_sync_ifp_addr(priv);
mlx5e_apply_ifp_addr(priv);
}
void
mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
static void
mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
{
struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
struct ifnet *ndev = priv->ifp;
bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
@ -1312,8 +1312,8 @@ mlx5e_set_rx_mode_work(struct work_struct *work)
container_of(work, struct mlx5e_priv, set_rx_mode_work);
PRIV_LOCK(priv);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_set_rx_mode_core(priv);
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_set_rx_mode_core(priv, true);
PRIV_UNLOCK(priv);
}
@ -2015,7 +2015,7 @@ mlx5e_vxlan_start(void *arg, struct ifnet *ifp __unused, sa_family_t family,
PRIV_LOCK(priv);
err = mlx5_vxlan_udp_port_add(priv->mdev, port);
if (err == 0 && test_bit(MLX5E_STATE_OPENED, &priv->state))
if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_add_vxlan_rule(priv, family, port);
PRIV_UNLOCK(priv);
}
@ -2027,7 +2027,7 @@ mlx5e_vxlan_stop(void *arg, struct ifnet *ifp __unused, sa_family_t family,
struct mlx5e_priv *priv = arg;
PRIV_LOCK(priv);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_del_vxlan_rule(priv, family, port);
(void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
PRIV_UNLOCK(priv);
@ -2260,28 +2260,26 @@ mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
}
int
mlx5e_open_flow_table(struct mlx5e_priv *priv)
mlx5e_open_flow_tables(struct mlx5e_priv *priv)
{
int err;
priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
/* setup namespace pointer */
priv->fts.ns = mlx5_get_flow_namespace(
priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
err = mlx5e_create_vlan_flow_table(priv);
if (err)
return (err);
if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_create_vxlan_flow_table(priv);
if (err)
goto err_destroy_vlan_flow_table;
}
err = mlx5e_create_main_flow_table(priv, false);
if (err)
goto err_destroy_vxlan_flow_table;
if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_create_main_flow_table(priv, true);
if (err)
goto err_destroy_main_flow_table;
@ -2291,18 +2289,11 @@ mlx5e_open_flow_table(struct mlx5e_priv *priv)
goto err_destroy_main_vxlan_flow_table;
err = mlx5e_add_vxlan_catchall_rule(priv);
if (err != 0)
if (err)
goto err_destroy_inner_rss_flow_table;
err = mlx5e_add_main_vxlan_rules(priv);
if (err != 0)
goto err_destroy_vxlan_catchall_rule;
}
return (0);
err_destroy_vxlan_catchall_rule:
mlx5e_del_vxlan_catchall_rule(priv);
err_destroy_inner_rss_flow_table:
mlx5e_destroy_inner_rss_flow_table(priv);
err_destroy_main_vxlan_flow_table:
@ -2310,7 +2301,6 @@ mlx5e_open_flow_table(struct mlx5e_priv *priv)
err_destroy_main_flow_table:
mlx5e_destroy_main_flow_table(priv);
err_destroy_vxlan_flow_table:
if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0)
mlx5e_destroy_vxlan_flow_table(priv);
err_destroy_vlan_flow_table:
mlx5e_destroy_vlan_flow_table(priv);
@ -2319,18 +2309,55 @@ mlx5e_open_flow_table(struct mlx5e_priv *priv)
}
void
mlx5e_close_flow_table(struct mlx5e_priv *priv)
mlx5e_close_flow_tables(struct mlx5e_priv *priv)
{
mlx5e_handle_ifp_addr(priv);
if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
mlx5e_destroy_inner_rss_flow_table(priv);
mlx5e_del_vxlan_catchall_rule(priv);
mlx5e_destroy_vxlan_flow_table(priv);
mlx5e_del_main_vxlan_rules(priv);
}
mlx5e_destroy_main_flow_table(priv);
if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0)
mlx5e_destroy_inner_rss_flow_table(priv);
mlx5e_destroy_main_vxlan_flow_table(priv);
mlx5e_destroy_main_flow_table(priv);
mlx5e_destroy_vxlan_flow_table(priv);
mlx5e_destroy_vlan_flow_table(priv);
}
int
mlx5e_open_flow_rules(struct mlx5e_priv *priv)
{
int err;
err = mlx5e_add_all_vlan_rules(priv);
if (err)
return (err);
err = mlx5e_add_main_vxlan_rules(priv);
if (err)
goto err_del_all_vlan_rules;
err = mlx5e_add_all_vxlan_rules(priv);
if (err)
goto err_del_main_vxlan_rules;
mlx5e_set_rx_mode_core(priv, true);
set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
return (0);
err_del_main_vxlan_rules:
mlx5e_del_main_vxlan_rules(priv);
err_del_all_vlan_rules:
mlx5e_del_all_vlan_rules(priv);
return (err);
}
void
mlx5e_close_flow_rules(struct mlx5e_priv *priv)
{
clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
mlx5e_set_rx_mode_core(priv, false);
mlx5e_del_all_vxlan_rules(priv);
mlx5e_del_main_vxlan_rules(priv);
mlx5e_del_all_vlan_rules(priv);
}

View File

@ -3086,13 +3086,13 @@ mlx5e_close_tir(struct mlx5e_priv *priv, int tt, bool inner_vxlan)
}
static int
mlx5e_open_tirs(struct mlx5e_priv *priv, bool inner_vxlan)
mlx5e_open_tirs(struct mlx5e_priv *priv)
{
int err;
int i;
for (i = 0; i < MLX5E_NUM_TT; i++) {
err = mlx5e_open_tir(priv, i, inner_vxlan);
for (i = 0; i != 2 * MLX5E_NUM_TT; i++) {
err = mlx5e_open_tir(priv, i / 2, (i % 2) ? true : false);
if (err)
goto err_close_tirs;
}
@ -3101,18 +3101,18 @@ mlx5e_open_tirs(struct mlx5e_priv *priv, bool inner_vxlan)
err_close_tirs:
for (i--; i >= 0; i--)
mlx5e_close_tir(priv, i, inner_vxlan);
mlx5e_close_tir(priv, i / 2, (i % 2) ? true : false);
return (err);
}
static void
mlx5e_close_tirs(struct mlx5e_priv *priv, bool inner_vxlan)
mlx5e_close_tirs(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i < MLX5E_NUM_TT; i++)
mlx5e_close_tir(priv, i, inner_vxlan);
for (i = 0; i != 2 * MLX5E_NUM_TT; i++)
mlx5e_close_tir(priv, i / 2, (i % 2) ? true : false);
}
/*
@ -3220,62 +3220,13 @@ mlx5e_open_locked(struct ifnet *ifp)
mlx5_en_err(ifp, "mlx5e_activate_rqt failed, %d\n", err);
goto err_close_channels;
}
err = mlx5e_open_tirs(priv, false);
if (err) {
mlx5_en_err(ifp, "mlx5e_open_tir(main) failed, %d\n", err);
goto err_close_rqls;
}
if ((ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_open_tirs(priv, true);
if (err) {
mlx5_en_err(ifp, "mlx5e_open_tir(inner) failed, %d\n",
err);
goto err_close_tirs;
}
}
err = mlx5e_open_flow_table(priv);
if (err) {
mlx5_en_err(ifp,
"mlx5e_open_flow_table failed, %d\n", err);
goto err_close_tirs_inner;
}
err = mlx5e_add_all_vlan_rules(priv);
if (err) {
mlx5_en_err(ifp,
"mlx5e_add_all_vlan_rules failed, %d\n", err);
goto err_close_flow_table;
}
if ((ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_add_all_vxlan_rules(priv);
if (err) {
mlx5_en_err(ifp,
"mlx5e_add_all_vxlan_rules failed, %d\n", err);
goto err_del_vlan_rules;
}
}
set_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_update_carrier(priv);
mlx5e_set_rx_mode_core(priv);
return (0);
err_del_vlan_rules:
mlx5e_del_all_vlan_rules(priv);
err_close_flow_table:
mlx5e_close_flow_table(priv);
err_close_tirs_inner:
if ((ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0)
mlx5e_close_tirs(priv, true);
err_close_tirs:
mlx5e_close_tirs(priv, false);
err_close_rqls:
mlx5e_deactivate_rqt(priv);
err_close_channels:
mlx5e_close_channels(priv);
@ -3315,15 +3266,8 @@ mlx5e_close_locked(struct ifnet *ifp)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_set_rx_mode_core(priv);
mlx5e_del_all_vlan_rules(priv);
if ((ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0)
mlx5e_del_all_vxlan_rules(priv);
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
mlx5e_close_flow_table(priv);
if ((ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0)
mlx5e_close_tirs(priv, true);
mlx5e_close_tirs(priv, false);
mlx5e_deactivate_rqt(priv);
mlx5e_close_channels(priv);
mlx5_vport_dealloc_q_counter(priv->mdev,
@ -3561,16 +3505,21 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
if (mask & IFCAP_WOL_MAGIC)
ifp->if_capenable ^= IFCAP_WOL_MAGIC;
if (mask & IFCAP_VXLAN_HWCSUM) {
int was_opened = test_bit(MLX5E_STATE_OPENED,
&priv->state);
if (was_opened)
mlx5e_close_locked(ifp);
const bool was_enabled =
(ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0;
if (was_enabled)
mlx5e_del_all_vxlan_rules(priv);
ifp->if_capenable ^= IFCAP_VXLAN_HWCSUM;
ifp->if_hwassist ^= CSUM_INNER_IP | CSUM_INNER_IP_UDP |
CSUM_INNER_IP_TCP | CSUM_INNER_IP6_UDP |
CSUM_INNER_IP6_TCP;
if (was_opened)
mlx5e_open_locked(ifp);
if (!was_enabled) {
int err = mlx5e_add_all_vxlan_rules(priv);
if (err != 0) {
mlx5_en_err(ifp,
"mlx5e_add_all_vxlan_rules() failed, %d (ignored)\n", err);
}
}
}
if (mask & IFCAP_VXLAN_HWTSO) {
ifp->if_capenable ^= IFCAP_VXLAN_HWTSO;
@ -4603,6 +4552,18 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
goto err_open_drop_rq;
}
err = mlx5e_open_flow_tables(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_open_flow_tables failed (%d)\n", __func__, err);
goto err_open_rqt;
}
err = mlx5e_open_tirs(priv);
if (err) {
mlx5_en_err(ifp, "mlx5e_open_tirs() failed, %d\n", err);
goto err_open_flow_tables;
}
/* set default MTU */
mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
@ -4728,8 +4689,22 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
pa.pa_headname = ifp->if_xname;
priv->pfil = pfil_head_register(&pa);
PRIV_LOCK(priv);
err = mlx5e_open_flow_rules(priv);
if (err) {
mlx5_en_err(ifp,
"mlx5e_open_flow_rules() failed, %d (ignored)\n", err);
}
PRIV_UNLOCK(priv);
return (priv);
err_open_flow_tables:
mlx5e_close_flow_tables(priv);
err_open_rqt:
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn, 0);
err_open_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
@ -4819,6 +4794,7 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
/* make sure device gets closed */
PRIV_LOCK(priv);
mlx5e_close_locked(ifp);
mlx5e_close_flow_rules(priv);
PRIV_UNLOCK(priv);
/* deregister pfil */
@ -4831,6 +4807,8 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
ifmedia_removeall(&priv->media);
ether_ifdetach(ifp);
mlx5e_close_tirs(priv);
mlx5e_close_flow_tables(priv);
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn, 0);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_tls_cleanup(priv);