net/i40e: optimize flow director memory management

This patch allocated some memory pool for flow management to avoid
calling rte_zmalloc/rte_free every time.
This patch also improves the hash table operation. When adding/removing
a flow, the software will directly add/delete it from the hash table.
If any error occurs, it then roll back the operation it just done.

Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
This commit is contained in:
Chenmin Sun 2020-07-18 01:36:56 +08:00 committed by Ferruh Yigit
parent f97d6192d5
commit 3e44dbd028
4 changed files with 280 additions and 70 deletions

View File

@ -1051,6 +1051,10 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
char fdir_hash_name[RTE_HASH_NAMESIZE];
uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
uint32_t best = hw->func_caps.fd_filters_best_effort;
struct rte_bitmap *bmp = NULL;
uint32_t bmp_size;
void *mem = NULL;
uint32_t i = 0;
int ret;
struct rte_hash_parameters fdir_hash_params = {
@ -1083,6 +1087,18 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
goto err_fdir_hash_map_alloc;
}
fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
sizeof(struct i40e_fdir_filter) *
I40E_MAX_FDIR_FILTER_NUM,
0);
if (!fdir_info->fdir_filter_array) {
PMD_INIT_LOG(ERR,
"Failed to allocate memory for fdir filter array!");
ret = -ENOMEM;
goto err_fdir_filter_array_alloc;
}
fdir_info->fdir_space_size = alloc + best;
fdir_info->fdir_actual_cnt = 0;
fdir_info->fdir_guarantee_total_space = alloc;
@ -1091,8 +1107,53 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
fdir_info->fdir_flow_pool.pool =
rte_zmalloc("i40e_fdir_entry",
sizeof(struct i40e_fdir_entry) *
fdir_info->fdir_space_size,
0);
if (!fdir_info->fdir_flow_pool.pool) {
PMD_INIT_LOG(ERR,
"Failed to allocate memory for bitmap flow!");
ret = -ENOMEM;
goto err_fdir_bitmap_flow_alloc;
}
for (i = 0; i < fdir_info->fdir_space_size; i++)
fdir_info->fdir_flow_pool.pool[i].idx = i;
bmp_size =
rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
if (mem == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate memory for fdir bitmap!");
ret = -ENOMEM;
goto err_fdir_mem_alloc;
}
bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
if (bmp == NULL) {
PMD_INIT_LOG(ERR,
"Failed to initialization fdir bitmap!");
ret = -ENOMEM;
goto err_fdir_bmp_alloc;
}
for (i = 0; i < fdir_info->fdir_space_size; i++)
rte_bitmap_set(bmp, i);
fdir_info->fdir_flow_pool.bitmap = bmp;
return 0;
err_fdir_bmp_alloc:
rte_free(mem);
err_fdir_mem_alloc:
rte_free(fdir_info->fdir_flow_pool.pool);
err_fdir_bitmap_flow_alloc:
rte_free(fdir_info->fdir_filter_array);
err_fdir_filter_array_alloc:
rte_free(fdir_info->hash_map);
err_fdir_hash_map_alloc:
rte_hash_free(fdir_info->hash_table);
@ -1790,16 +1851,30 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf)
struct i40e_fdir_info *fdir_info;
fdir_info = &pf->fdir;
/* Remove all flow director rules and hash */
/* Remove all flow director rules */
while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
}
static void
i40e_fdir_memory_cleanup(struct i40e_pf *pf)
{
struct i40e_fdir_info *fdir_info;
fdir_info = &pf->fdir;
/* flow director memory cleanup */
if (fdir_info->hash_map)
rte_free(fdir_info->hash_map);
if (fdir_info->hash_table)
rte_hash_free(fdir_info->hash_table);
while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
rte_free(p_fdir);
}
if (fdir_info->fdir_flow_pool.bitmap)
rte_bitmap_free(fdir_info->fdir_flow_pool.bitmap);
if (fdir_info->fdir_flow_pool.pool)
rte_free(fdir_info->fdir_flow_pool.pool);
if (fdir_info->fdir_filter_array)
rte_free(fdir_info->fdir_filter_array);
}
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
@ -2659,9 +2734,14 @@ i40e_dev_close(struct rte_eth_dev *dev)
/* Remove all flows */
while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
TAILQ_REMOVE(&pf->flow_list, p_flow, node);
rte_free(p_flow);
/* Do not free FDIR flows since they are static allocated */
if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
rte_free(p_flow);
}
/* release the fdir static allocated memory */
i40e_fdir_memory_cleanup(pf);
/* Remove all Traffic Manager configuration */
i40e_tm_conf_uninit(dev);

View File

@ -264,6 +264,15 @@ enum i40e_flxpld_layer_idx {
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
/*
* Struct to store flow created.
*/
struct rte_flow {
TAILQ_ENTRY(rte_flow) node;
enum rte_filter_type filter_type;
void *rule;
};
/**
* The overhead from MTU to max frame size.
* Considering QinQ packet, the VLAN tag needs to be counted twice.
@ -674,6 +683,23 @@ struct i40e_fdir_filter {
struct i40e_fdir_filter_conf fdir;
};
/* fdir memory pool entry */
struct i40e_fdir_entry {
struct rte_flow flow;
uint32_t idx;
};
/* pre-allocated fdir memory pool */
struct i40e_fdir_flow_pool {
/* a bitmap to manage the fdir pool */
struct rte_bitmap *bitmap;
/* the size the pool is pf->fdir->fdir_space_size */
struct i40e_fdir_entry *pool;
};
#define FLOW_TO_FLOW_BITMAP(f) \
container_of((f), struct i40e_fdir_entry, flow)
TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
@ -697,6 +723,8 @@ struct i40e_fdir_info {
struct i40e_fdir_filter_list fdir_list;
struct i40e_fdir_filter **hash_map;
struct rte_hash *hash_table;
/* An array to store the inserted rules input */
struct i40e_fdir_filter *fdir_filter_array;
/*
* Priority ordering at filter invalidation(destroying a flow) between
@ -721,6 +749,8 @@ struct i40e_fdir_info {
uint32_t fdir_guarantee_free_space;
/* the fdir total guaranteed space */
uint32_t fdir_guarantee_total_space;
/* the pre-allocated pool of the rte_flow */
struct i40e_fdir_flow_pool fdir_flow_pool;
/* Mark if flex pit and mask is set */
bool flex_pit_flag[I40E_MAX_FLXPLD_LAYER];
@ -918,15 +948,6 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
* Struct to store flow created.
*/
struct rte_flow {
TAILQ_ENTRY(rte_flow) node;
enum rte_filter_type filter_type;
void *rule;
};
TAILQ_HEAD(i40e_flow_list, rte_flow);
/* Struct to store Traffic Manager shaper profile. */
@ -1358,6 +1379,10 @@ int i40e_ethertype_filter_set(struct i40e_pf *pf,
int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add);
struct rte_flow *
i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info);
void i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
struct rte_flow *flow);
int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct i40e_fdir_filter_conf *filter,
bool add);

View File

@ -180,6 +180,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
return I40E_SUCCESS;
}
/* make new FDIR VSI */
vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
if (!vsi) {
@ -1570,6 +1571,7 @@ static int
i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
{
struct i40e_fdir_info *fdir_info = &pf->fdir;
struct i40e_fdir_filter *hash_filter;
int ret;
if (filter->fdir.input.flow_ext.pkt_template)
@ -1585,9 +1587,14 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
ret);
return ret;
}
fdir_info->hash_map[ret] = filter;
TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
if (fdir_info->hash_map[ret])
return -1;
hash_filter = &fdir_info->fdir_filter_array[ret];
rte_memcpy(hash_filter, filter, sizeof(*filter));
fdir_info->hash_map[ret] = hash_filter;
TAILQ_INSERT_TAIL(&fdir_info->fdir_list, hash_filter, rules);
return 0;
}
@ -1616,11 +1623,57 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
fdir_info->hash_map[ret] = NULL;
TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
rte_free(filter);
return 0;
}
struct rte_flow *
i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info)
{
struct rte_flow *flow = NULL;
uint64_t slab = 0;
uint32_t pos = 0;
uint32_t i = 0;
int ret;
if (fdir_info->fdir_actual_cnt >=
fdir_info->fdir_space_size) {
PMD_DRV_LOG(ERR, "Fdir space full");
return NULL;
}
ret = rte_bitmap_scan(fdir_info->fdir_flow_pool.bitmap, &pos,
&slab);
/* normally this won't happen as the fdir_actual_cnt should be
* same with the number of the set bits in fdir_flow_pool,
* but anyway handle this error condition here for safe
*/
if (ret == 0) {
PMD_DRV_LOG(ERR, "fdir_actual_cnt out of sync");
return NULL;
}
i = rte_bsf64(slab);
pos += i;
rte_bitmap_clear(fdir_info->fdir_flow_pool.bitmap, pos);
flow = &fdir_info->fdir_flow_pool.pool[pos].flow;
memset(flow, 0, sizeof(struct rte_flow));
return flow;
}
void
i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
struct rte_flow *flow)
{
struct i40e_fdir_entry *f;
f = FLOW_TO_FLOW_BITMAP(flow);
rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx);
}
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@ -1699,7 +1752,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
struct i40e_fdir_info *fdir_info = &pf->fdir;
struct i40e_fdir_filter *fdir_filter, *node;
struct i40e_fdir_filter *node;
struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
@ -1732,25 +1785,36 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
/* Check if there is the filter in SW list */
memset(&check_filter, 0, sizeof(check_filter));
i40e_fdir_filter_convert(filter, &check_filter);
node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
if (add && node) {
PMD_DRV_LOG(ERR,
"Conflict with existing flow director rules!");
return -EINVAL;
}
if (!add && !node) {
PMD_DRV_LOG(ERR,
"There's no corresponding flow firector filter!");
return -EINVAL;
if (add) {
ret = i40e_sw_fdir_filter_insert(pf, &check_filter);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"Conflict with existing flow director rules!");
return -EINVAL;
}
} else {
node = i40e_sw_fdir_filter_lookup(fdir_info,
&check_filter.fdir.input);
if (!node) {
PMD_DRV_LOG(ERR,
"There's no corresponding flow firector filter!");
return -EINVAL;
}
ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"Error deleting fdir rule from hash table!");
return -EINVAL;
}
}
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
if (ret < 0) {
PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
return ret;
goto error_op;
}
if (hw->mac.type == I40E_MAC_X722) {
@ -1763,7 +1827,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
if (ret < 0) {
PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
pctype);
return ret;
goto error_op;
}
if (add) {
@ -1771,29 +1835,24 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
if (fdir_info->fdir_invalprio == 1 &&
fdir_info->fdir_guarantee_free_space > 0)
fdir_info->fdir_guarantee_free_space--;
fdir_filter = rte_zmalloc("fdir_filter",
sizeof(*fdir_filter), 0);
if (fdir_filter == NULL) {
PMD_DRV_LOG(ERR, "Failed to alloc memory.");
return -ENOMEM;
}
rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
if (ret < 0)
rte_free(fdir_filter);
} else {
fdir_info->fdir_actual_cnt--;
if (fdir_info->fdir_invalprio == 1 &&
fdir_info->fdir_guarantee_free_space <
fdir_info->fdir_guarantee_total_space)
fdir_info->fdir_guarantee_free_space++;
ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
}
return ret;
error_op:
/* roll back */
if (add)
i40e_sw_fdir_filter_del(pf, &check_filter.fdir.input);
else
i40e_sw_fdir_filter_insert(pf, &check_filter);
return ret;
}
/*

View File

@ -145,6 +145,8 @@ const struct rte_flow_ops i40e_flow_ops = {
static union i40e_filter_t cons_filter;
static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* internal pattern w/o VOID items */
struct rte_flow_item g_items[32];
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
@ -5264,7 +5266,6 @@ i40e_flow_validate(struct rte_eth_dev *dev,
NULL, "NULL attribute.");
return -rte_errno;
}
memset(&cons_filter, 0, sizeof(cons_filter));
/* Get the non-void item of action */
@ -5286,12 +5287,18 @@ i40e_flow_validate(struct rte_eth_dev *dev,
}
item_num++;
items = rte_zmalloc("i40e_pattern",
item_num * sizeof(struct rte_flow_item), 0);
if (!items) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
NULL, "No memory for PMD internal items.");
return -ENOMEM;
if (item_num <= ARRAY_SIZE(g_items)) {
items = g_items;
} else {
items = rte_zmalloc("i40e_pattern",
item_num * sizeof(struct rte_flow_item), 0);
if (!items) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_ITEM_NUM,
NULL,
"No memory for PMD internal items.");
return -ENOMEM;
}
}
i40e_pattern_skip_void_item(items, pattern);
@ -5303,16 +5310,21 @@ i40e_flow_validate(struct rte_eth_dev *dev,
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
pattern, "Unsupported pattern");
rte_free(items);
if (items != g_items)
rte_free(items);
return -rte_errno;
}
if (parse_filter)
ret = parse_filter(dev, attr, items, actions,
error, &cons_filter);
flag = true;
} while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
rte_free(items);
if (items != g_items)
rte_free(items);
return ret;
}
@ -5325,21 +5337,33 @@ i40e_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_flow *flow;
struct rte_flow *flow = NULL;
struct i40e_fdir_info *fdir_info = &pf->fdir;
int ret;
flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to allocate memory");
return flow;
}
ret = i40e_flow_validate(dev, attr, pattern, actions, error);
if (ret < 0)
return NULL;
if (cons_filter_type == RTE_ETH_FILTER_FDIR) {
flow = i40e_fdir_entry_pool_get(fdir_info);
if (flow == NULL) {
rte_flow_error_set(error, ENOBUFS,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Fdir space full");
return flow;
}
} else {
flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to allocate memory");
return flow;
}
}
switch (cons_filter_type) {
case RTE_ETH_FILTER_ETHERTYPE:
ret = i40e_ethertype_filter_set(pf,
@ -5351,7 +5375,7 @@ i40e_flow_create(struct rte_eth_dev *dev,
break;
case RTE_ETH_FILTER_FDIR:
ret = i40e_flow_add_del_fdir_filter(dev,
&cons_filter.fdir_filter, 1);
&cons_filter.fdir_filter, 1);
if (ret)
goto free_flow;
flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
@ -5385,7 +5409,12 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to create flow.");
rte_free(flow);
if (cons_filter_type != RTE_ETH_FILTER_FDIR)
rte_free(flow);
else
i40e_fdir_entry_pool_put(fdir_info, flow);
return NULL;
}
@ -5396,6 +5425,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
enum rte_filter_type filter_type = flow->filter_type;
struct i40e_fdir_info *fdir_info = &pf->fdir;
int ret = 0;
switch (filter_type) {
@ -5409,7 +5439,8 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
break;
case RTE_ETH_FILTER_FDIR:
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
&((struct i40e_fdir_filter *)flow->rule)->fdir,
0);
/* If the last flow is destroyed, disable fdir. */
if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
@ -5429,7 +5460,11 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
if (!ret) {
TAILQ_REMOVE(&pf->flow_list, flow, node);
rte_free(flow);
if (filter_type == RTE_ETH_FILTER_FDIR)
i40e_fdir_entry_pool_put(fdir_info, flow);
else
rte_free(flow);
} else
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@ -5583,6 +5618,7 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
struct rte_flow *flow;
void *temp;
int ret;
uint32_t i = 0;
ret = i40e_fdir_flush(dev);
if (!ret) {
@ -5598,13 +5634,23 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
TAILQ_REMOVE(&pf->flow_list, flow, node);
rte_free(flow);
}
}
/* reset bitmap */
rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap);
for (i = 0; i < fdir_info->fdir_space_size; i++) {
fdir_info->fdir_flow_pool.pool[i].idx = i;
rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i);
}
fdir_info->fdir_actual_cnt = 0;
fdir_info->fdir_guarantee_free_space =
fdir_info->fdir_guarantee_total_space;
memset(fdir_info->fdir_filter_array,
0,
sizeof(struct i40e_fdir_filter) *
I40E_MAX_FDIR_FILTER_NUM);
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)