net/mlx5: fix tunnel offload object allocation

The original patch allocated tunnel offload objects with invalid
indexes. As the result, PMD tunnel object allocation failed.

In this patch indexed pool provides both an index and memory for a new
tunnel offload object.
Also tunnel offload ipool moved to dv enabled code only.

Fixes: 4ae8825c50 ("net/mlx5: use indexed pool as id generator")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Gregory Etelson 2020-11-16 16:02:21 +02:00 committed by Ferruh Yigit
parent eab3ca4858
commit 9cac7ded37
3 changed files with 37 additions and 52 deletions

View File

@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
{
[MLX5_IPOOL_DECAP_ENCAP] = {
.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
.trunk_size = 64,
.grow_trunk = 3,
@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_encap_decap_ipool",
},
{
[MLX5_IPOOL_PUSH_VLAN] = {
.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
.trunk_size = 64,
.grow_trunk = 3,
@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_push_vlan_ipool",
},
{
[MLX5_IPOOL_TAG] = {
.size = sizeof(struct mlx5_flow_dv_tag_resource),
.trunk_size = 64,
.grow_trunk = 3,
@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_tag_ipool",
},
{
[MLX5_IPOOL_PORT_ID] = {
.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
.trunk_size = 64,
.grow_trunk = 3,
@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_port_id_ipool",
},
{
[MLX5_IPOOL_JUMP] = {
.size = sizeof(struct mlx5_flow_tbl_data_entry),
.trunk_size = 64,
.grow_trunk = 3,
@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_jump_ipool",
},
{
[MLX5_IPOOL_SAMPLE] = {
.size = sizeof(struct mlx5_flow_dv_sample_resource),
.trunk_size = 64,
.grow_trunk = 3,
@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_sample_ipool",
},
{
[MLX5_IPOOL_DEST_ARRAY] = {
.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
.trunk_size = 64,
.grow_trunk = 3,
@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_dest_array_ipool",
},
[MLX5_IPOOL_TUNNEL_ID] = {
.size = sizeof(struct mlx5_flow_tunnel),
.need_lock = 1,
.release_mem_en = 1,
.type = "mlx5_tunnel_offload",
},
[MLX5_IPOOL_TNL_TBL_ID] = {
.size = 0,
.need_lock = 1,
.type = "mlx5_flow_tnl_tbl_ipool",
},
#endif
{
[MLX5_IPOOL_MTR] = {
.size = sizeof(struct mlx5_flow_meter),
.trunk_size = 64,
.grow_trunk = 3,
@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_meter_ipool",
},
{
[MLX5_IPOOL_MCP] = {
.size = sizeof(struct mlx5_flow_mreg_copy_resource),
.trunk_size = 64,
.grow_trunk = 3,
@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_mcp_ipool",
},
{
[MLX5_IPOOL_HRXQ] = {
.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
.trunk_size = 64,
.grow_trunk = 3,
@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_hrxq_ipool",
},
{
[MLX5_IPOOL_MLX5_FLOW] = {
/*
* MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
* It set in run time according to PCI function configuration.
@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_flow_handle_ipool",
},
{
[MLX5_IPOOL_RTE_FLOW] = {
.size = sizeof(struct rte_flow),
.trunk_size = 4096,
.need_lock = 1,
@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "rte_flow_ipool",
},
{
[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
.size = 0,
.need_lock = 1,
.type = "mlx5_flow_rss_id_ipool",
},
{
.size = 0,
.need_lock = 1,
.type = "mlx5_flow_tnl_flow_ipool",
},
{
.size = 0,
.need_lock = 1,
.type = "mlx5_flow_tnl_tbl_ipool",
},
{
[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
.size = sizeof(struct mlx5_shared_action_rss),
.trunk_size = 64,
.grow_trunk = 3,
@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
.free = mlx5_free,
.type = "mlx5_shared_action_rss",
},
};

View File

@ -44,6 +44,8 @@ enum mlx5_ipool_index {
MLX5_IPOOL_JUMP, /* Pool for jump resource. */
MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
#endif
MLX5_IPOOL_MTR, /* Pool for meter resource. */
MLX5_IPOOL_MCP, /* Pool for metadata resource. */
@ -51,8 +53,6 @@ enum mlx5_ipool_index {
MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */
MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
MLX5_IPOOL_MAX,
};

View File

@ -7406,14 +7406,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
struct mlx5_flow_tunnel *tunnel)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_indexed_pool *ipool;
DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
dev->data->port_id, tunnel->tunnel_id);
RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
tunnel->tunnel_id);
mlx5_hlist_destroy(tunnel->groups);
mlx5_free(tunnel);
ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
mlx5_ipool_free(ipool, tunnel->tunnel_id);
}
static struct mlx5_flow_tunnel *
@ -7435,39 +7434,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
const struct rte_flow_tunnel *app_tunnel)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_indexed_pool *ipool;
struct mlx5_flow_tunnel *tunnel;
uint32_t id;
mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
&id);
ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
tunnel = mlx5_ipool_zmalloc(ipool, &id);
if (!tunnel)
return NULL;
if (id >= MLX5_MAX_TUNNELS) {
mlx5_ipool_free(priv->sh->ipool
[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
mlx5_ipool_free(ipool, id);
DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
return NULL;
} else if (!id) {
return NULL;
}
/**
* mlx5 flow tunnel is an auxlilary data structure
* It's not part of IO. No need to allocate it from
* huge pages pools dedicated for IO
*/
tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
0, SOCKET_ID_ANY);
if (!tunnel) {
mlx5_ipool_free(priv->sh->ipool
[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
return NULL;
}
tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
mlx5_flow_tunnel_grp2tbl_create_cb,
NULL,
mlx5_flow_tunnel_grp2tbl_remove_cb);
if (!tunnel->groups) {
mlx5_ipool_free(priv->sh->ipool
[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
mlx5_free(tunnel);
mlx5_ipool_free(ipool, id);
return NULL;
}
tunnel->groups->ctx = priv->sh;