net/mlx5: support flow age action with HWS
Add support for AGE action for HW steering. This patch includes: 1. Add new structures to manage aging. 2. Initialize all of them in configure function. 3. Implement per second aging check using CNT background thread. 4. Enable AGE action in flow create/destroy operations. 5. Implement a queue-based function to report aged flow rules. Signed-off-by: Michael Baum <michaelba@nvidia.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
parent
48fbb0e93d
commit
04a4de756e
@ -459,6 +459,22 @@ Limitations
|
||||
The modify field action is not intended to modify VLAN headers type field,
|
||||
dedicated VLAN push and pop actions should be used instead.
|
||||
|
||||
- Age action:
|
||||
|
||||
- with HW steering (``dv_flow_en=2``)
|
||||
|
||||
- Using the same indirect count action combined with multiple age actions
|
||||
in different flows may cause a wrong age state for the age actions.
|
||||
- Creating/destroying flow rules with indirect age action when it is active
|
||||
(timeout != 0) may cause a wrong age state for the indirect age action.
|
||||
|
||||
- The driver reuses counters for aging action, so for optimization
|
||||
the values in ``rte_flow_port_attr`` structure should describe:
|
||||
|
||||
- ``nb_counters`` is the number of flow rules using counter (with/without age)
|
||||
in addition to flow rules using only age (without count action).
|
||||
- ``nb_aging_objects`` is the number of flow rules containing age action.
|
||||
|
||||
- IPv6 header item 'proto' field, indicating the next header protocol, should
|
||||
not be set as extension header.
|
||||
In case the next header is an extension header, it should not be specified in
|
||||
|
@ -497,6 +497,12 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh)
|
||||
uint32_t i;
|
||||
struct mlx5_age_info *age_info;
|
||||
|
||||
/*
|
||||
* In HW steering, aging information structure is initialized later
|
||||
* during configure function.
|
||||
*/
|
||||
if (sh->config.dv_flow_en == 2)
|
||||
return;
|
||||
for (i = 0; i < sh->max_port; i++) {
|
||||
age_info = &sh->port[i].age_info;
|
||||
age_info->flags = 0;
|
||||
@ -540,8 +546,8 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
|
||||
hca_attr->flow_counter_bulk_alloc_bitmap);
|
||||
/* Initialize fallback mode only on the port initializes sh. */
|
||||
if (sh->refcnt == 1)
|
||||
sh->cmng.counter_fallback = fallback;
|
||||
else if (fallback != sh->cmng.counter_fallback)
|
||||
sh->sws_cmng.counter_fallback = fallback;
|
||||
else if (fallback != sh->sws_cmng.counter_fallback)
|
||||
DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
|
||||
"with others:%d.", PORT_ID(priv), fallback);
|
||||
#endif
|
||||
@ -556,17 +562,38 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
|
||||
static void
|
||||
mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
memset(&sh->cmng, 0, sizeof(sh->cmng));
|
||||
TAILQ_INIT(&sh->cmng.flow_counters);
|
||||
sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET;
|
||||
sh->cmng.max_id = -1;
|
||||
sh->cmng.last_pool_idx = POOL_IDX_INVALID;
|
||||
rte_spinlock_init(&sh->cmng.pool_update_sl);
|
||||
for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
|
||||
TAILQ_INIT(&sh->cmng.counters[i]);
|
||||
rte_spinlock_init(&sh->cmng.csl[i]);
|
||||
if (sh->config.dv_flow_en < 2) {
|
||||
memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
|
||||
TAILQ_INIT(&sh->sws_cmng.flow_counters);
|
||||
sh->sws_cmng.min_id = MLX5_CNT_BATCH_OFFSET;
|
||||
sh->sws_cmng.max_id = -1;
|
||||
sh->sws_cmng.last_pool_idx = POOL_IDX_INVALID;
|
||||
rte_spinlock_init(&sh->sws_cmng.pool_update_sl);
|
||||
for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) {
|
||||
TAILQ_INIT(&sh->sws_cmng.counters[i]);
|
||||
rte_spinlock_init(&sh->sws_cmng.csl[i]);
|
||||
}
|
||||
} else {
|
||||
struct mlx5_hca_attr *attr = &sh->cdev->config.hca_attr;
|
||||
uint32_t fw_max_nb_cnts = attr->max_flow_counter;
|
||||
uint8_t log_dcs = log2above(fw_max_nb_cnts) - 1;
|
||||
uint32_t max_nb_cnts = 0;
|
||||
|
||||
for (i = 0, j = 0; j < MLX5_HWS_CNT_DCS_NUM; ++i) {
|
||||
int log_dcs_i = log_dcs - i;
|
||||
|
||||
if (log_dcs_i < 0)
|
||||
break;
|
||||
if ((max_nb_cnts | RTE_BIT32(log_dcs_i)) >
|
||||
fw_max_nb_cnts)
|
||||
continue;
|
||||
max_nb_cnts |= RTE_BIT32(log_dcs_i);
|
||||
j++;
|
||||
}
|
||||
sh->hws_max_log_bulk_sz = log_dcs;
|
||||
sh->hws_max_nb_counters = max_nb_cnts;
|
||||
}
|
||||
}
|
||||
|
||||
@ -607,13 +634,13 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
|
||||
rte_pause();
|
||||
}
|
||||
|
||||
if (sh->cmng.pools) {
|
||||
if (sh->sws_cmng.pools) {
|
||||
struct mlx5_flow_counter_pool *pool;
|
||||
uint16_t n_valid = sh->cmng.n_valid;
|
||||
bool fallback = sh->cmng.counter_fallback;
|
||||
uint16_t n_valid = sh->sws_cmng.n_valid;
|
||||
bool fallback = sh->sws_cmng.counter_fallback;
|
||||
|
||||
for (i = 0; i < n_valid; ++i) {
|
||||
pool = sh->cmng.pools[i];
|
||||
pool = sh->sws_cmng.pools[i];
|
||||
if (!fallback && pool->min_dcs)
|
||||
claim_zero(mlx5_devx_cmd_destroy
|
||||
(pool->min_dcs));
|
||||
@ -632,14 +659,14 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
|
||||
}
|
||||
mlx5_free(pool);
|
||||
}
|
||||
mlx5_free(sh->cmng.pools);
|
||||
mlx5_free(sh->sws_cmng.pools);
|
||||
}
|
||||
mng = LIST_FIRST(&sh->cmng.mem_mngs);
|
||||
mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
|
||||
while (mng) {
|
||||
mlx5_flow_destroy_counter_stat_mem_mng(mng);
|
||||
mng = LIST_FIRST(&sh->cmng.mem_mngs);
|
||||
mng = LIST_FIRST(&sh->sws_cmng.mem_mngs);
|
||||
}
|
||||
memset(&sh->cmng, 0, sizeof(sh->cmng));
|
||||
memset(&sh->sws_cmng, 0, sizeof(sh->sws_cmng));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -644,12 +644,45 @@ struct mlx5_geneve_tlv_option_resource {
|
||||
/* Current time in seconds. */
|
||||
#define MLX5_CURR_TIME_SEC (rte_rdtsc() / rte_get_tsc_hz())
|
||||
|
||||
/*
|
||||
* HW steering queue oriented AGE info.
|
||||
* It contains an array of rings, one for each HWS queue.
|
||||
*/
|
||||
struct mlx5_hws_q_age_info {
|
||||
uint16_t nb_rings; /* Number of aged-out ring lists. */
|
||||
struct rte_ring *aged_lists[]; /* Aged-out lists. */
|
||||
};
|
||||
|
||||
/*
|
||||
* HW steering AGE info.
|
||||
* It has a ring list containing all aged out flow rules.
|
||||
*/
|
||||
struct mlx5_hws_age_info {
|
||||
struct rte_ring *aged_list; /* Aged out lists. */
|
||||
};
|
||||
|
||||
/* Aging information for per port. */
|
||||
struct mlx5_age_info {
|
||||
uint8_t flags; /* Indicate if is new event or need to be triggered. */
|
||||
struct mlx5_counters aged_counters; /* Aged counter list. */
|
||||
struct aso_age_list aged_aso; /* Aged ASO actions list. */
|
||||
rte_spinlock_t aged_sl; /* Aged flow list lock. */
|
||||
union {
|
||||
/* SW/FW steering AGE info. */
|
||||
struct {
|
||||
struct mlx5_counters aged_counters;
|
||||
/* Aged counter list. */
|
||||
struct aso_age_list aged_aso;
|
||||
/* Aged ASO actions list. */
|
||||
rte_spinlock_t aged_sl; /* Aged flow list lock. */
|
||||
};
|
||||
struct {
|
||||
struct mlx5_indexed_pool *ages_ipool;
|
||||
union {
|
||||
struct mlx5_hws_age_info hw_age;
|
||||
/* HW steering AGE info. */
|
||||
struct mlx5_hws_q_age_info *hw_q_age;
|
||||
/* HW steering queue oriented AGE info. */
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/* Per port data of shared IB device. */
|
||||
@ -1312,6 +1345,9 @@ struct mlx5_dev_ctx_shared {
|
||||
uint32_t hws_tags:1; /* Check if tags info for HWS initialized. */
|
||||
uint32_t shared_mark_enabled:1;
|
||||
/* If mark action is enabled on Rxqs (shared E-Switch domain). */
|
||||
uint32_t hws_max_log_bulk_sz:5;
|
||||
/* Log of minimal HWS counters created hard coded. */
|
||||
uint32_t hws_max_nb_counters; /* Maximal number for HWS counters. */
|
||||
uint32_t max_port; /* Maximal IB device port index. */
|
||||
struct mlx5_bond_info bond; /* Bonding information. */
|
||||
struct mlx5_common_device *cdev; /* Backend mlx5 device. */
|
||||
@ -1353,7 +1389,8 @@ struct mlx5_dev_ctx_shared {
|
||||
struct mlx5_list *dest_array_list;
|
||||
struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
|
||||
/* List of destination array actions. */
|
||||
struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
|
||||
struct mlx5_flow_counter_mng sws_cmng;
|
||||
/* SW steering counters management structure. */
|
||||
void *default_miss_action; /* Default miss action. */
|
||||
struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
|
||||
struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
|
||||
@ -1683,6 +1720,9 @@ struct mlx5_priv {
|
||||
LIST_HEAD(flow_hw_at, rte_flow_actions_template) flow_hw_at;
|
||||
struct mlx5dr_context *dr_ctx; /**< HW steering DR context. */
|
||||
/* HW steering queue polling mechanism job descriptor LIFO. */
|
||||
uint32_t hws_strict_queue:1;
|
||||
/**< Whether all operations strictly happen on the same HWS queue. */
|
||||
uint32_t hws_age_req:1; /**< Whether this port has AGE indexed pool. */
|
||||
struct mlx5_hw_q *hw_q;
|
||||
/* HW steering rte flow table list header. */
|
||||
LIST_HEAD(flow_hw_tbl, rte_flow_template_table) flow_hw_tbl;
|
||||
@ -1998,6 +2038,9 @@ int mlx5_validate_action_ct(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_action_conntrack *conntrack,
|
||||
struct rte_flow_error *error);
|
||||
|
||||
int mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
|
||||
void **contexts, uint32_t nb_contexts,
|
||||
struct rte_flow_error *error);
|
||||
|
||||
/* mlx5_mp_os.c */
|
||||
|
||||
|
@ -43,6 +43,9 @@
|
||||
#define MLX5_PMD_SOFT_COUNTERS 1
|
||||
#endif
|
||||
|
||||
/* Maximum number of DCS created per port. */
|
||||
#define MLX5_HWS_CNT_DCS_NUM 4
|
||||
|
||||
/* Alarm timeout. */
|
||||
#define MLX5_ALARM_TIMEOUT_US 100000
|
||||
|
||||
|
@ -985,6 +985,9 @@ static const struct rte_flow_ops mlx5_flow_ops = {
|
||||
.isolate = mlx5_flow_isolate,
|
||||
.query = mlx5_flow_query,
|
||||
.dev_dump = mlx5_flow_dev_dump,
|
||||
#ifdef MLX5_HAVE_RTE_FLOW_Q_AGE
|
||||
.get_q_aged_flows = mlx5_flow_get_q_aged_flows,
|
||||
#endif
|
||||
.get_aged_flows = mlx5_flow_get_aged_flows,
|
||||
.action_handle_create = mlx5_action_handle_create,
|
||||
.action_handle_destroy = mlx5_action_handle_destroy,
|
||||
@ -8940,11 +8943,11 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
|
||||
mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
|
||||
}
|
||||
for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
|
||||
LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
|
||||
LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws,
|
||||
mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
|
||||
next);
|
||||
LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
|
||||
sh->cmng.mem_mng = mem_mng;
|
||||
LIST_INSERT_HEAD(&sh->sws_cmng.mem_mngs, mem_mng, next);
|
||||
sh->sws_cmng.mem_mng = mem_mng;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -8963,7 +8966,7 @@ static int
|
||||
mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
|
||||
struct mlx5_flow_counter_pool *pool)
|
||||
{
|
||||
struct mlx5_flow_counter_mng *cmng = &sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
|
||||
/* Resize statistic memory once used out. */
|
||||
if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
|
||||
mlx5_flow_create_counter_stat_mem_mng(sh)) {
|
||||
@ -8992,14 +8995,14 @@ mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
|
||||
{
|
||||
uint32_t pools_n, us;
|
||||
|
||||
pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
|
||||
pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
|
||||
us = MLX5_POOL_QUERY_FREQ_US / pools_n;
|
||||
DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
|
||||
if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
|
||||
sh->cmng.query_thread_on = 0;
|
||||
sh->sws_cmng.query_thread_on = 0;
|
||||
DRV_LOG(ERR, "Cannot reinitialize query alarm");
|
||||
} else {
|
||||
sh->cmng.query_thread_on = 1;
|
||||
sh->sws_cmng.query_thread_on = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -9015,12 +9018,12 @@ mlx5_flow_query_alarm(void *arg)
|
||||
{
|
||||
struct mlx5_dev_ctx_shared *sh = arg;
|
||||
int ret;
|
||||
uint16_t pool_index = sh->cmng.pool_index;
|
||||
struct mlx5_flow_counter_mng *cmng = &sh->cmng;
|
||||
uint16_t pool_index = sh->sws_cmng.pool_index;
|
||||
struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
|
||||
struct mlx5_flow_counter_pool *pool;
|
||||
uint16_t n_valid;
|
||||
|
||||
if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
|
||||
if (sh->sws_cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
|
||||
goto set_alarm;
|
||||
rte_spinlock_lock(&cmng->pool_update_sl);
|
||||
pool = cmng->pools[pool_index];
|
||||
@ -9033,7 +9036,7 @@ mlx5_flow_query_alarm(void *arg)
|
||||
/* There is a pool query in progress. */
|
||||
goto set_alarm;
|
||||
pool->raw_hw =
|
||||
LIST_FIRST(&sh->cmng.free_stat_raws);
|
||||
LIST_FIRST(&sh->sws_cmng.free_stat_raws);
|
||||
if (!pool->raw_hw)
|
||||
/* No free counter statistics raw memory. */
|
||||
goto set_alarm;
|
||||
@ -9059,12 +9062,12 @@ mlx5_flow_query_alarm(void *arg)
|
||||
goto set_alarm;
|
||||
}
|
||||
LIST_REMOVE(pool->raw_hw, next);
|
||||
sh->cmng.pending_queries++;
|
||||
sh->sws_cmng.pending_queries++;
|
||||
pool_index++;
|
||||
if (pool_index >= n_valid)
|
||||
pool_index = 0;
|
||||
set_alarm:
|
||||
sh->cmng.pool_index = pool_index;
|
||||
sh->sws_cmng.pool_index = pool_index;
|
||||
mlx5_set_query_alarm(sh);
|
||||
}
|
||||
|
||||
@ -9147,7 +9150,7 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
|
||||
(struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
|
||||
struct mlx5_counter_stats_raw *raw_to_free;
|
||||
uint8_t query_gen = pool->query_gen ^ 1;
|
||||
struct mlx5_flow_counter_mng *cmng = &sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &sh->sws_cmng;
|
||||
enum mlx5_counter_type cnt_type =
|
||||
pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
|
||||
MLX5_COUNTER_TYPE_ORIGIN;
|
||||
@ -9170,9 +9173,9 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
|
||||
rte_spinlock_unlock(&cmng->csl[cnt_type]);
|
||||
}
|
||||
}
|
||||
LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
|
||||
LIST_INSERT_HEAD(&sh->sws_cmng.free_stat_raws, raw_to_free, next);
|
||||
pool->raw_hw = NULL;
|
||||
sh->cmng.pending_queries--;
|
||||
sh->sws_cmng.pending_queries--;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -9532,7 +9535,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev,
|
||||
struct mlx5_list_inconst *l_inconst;
|
||||
struct mlx5_list_entry *e;
|
||||
int lcore_index;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
uint32_t max;
|
||||
void *action;
|
||||
|
||||
@ -9703,18 +9706,58 @@ mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
|
||||
{
|
||||
const struct mlx5_flow_driver_ops *fops;
|
||||
struct rte_flow_attr attr = { .transfer = 0 };
|
||||
enum mlx5_flow_drv_type type = flow_get_drv_type(dev, &attr);
|
||||
|
||||
if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
|
||||
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
|
||||
return fops->get_aged_flows(dev, contexts, nb_contexts,
|
||||
error);
|
||||
if (type == MLX5_FLOW_TYPE_DV || type == MLX5_FLOW_TYPE_HW) {
|
||||
fops = flow_get_drv_ops(type);
|
||||
return fops->get_aged_flows(dev, contexts, nb_contexts, error);
|
||||
}
|
||||
DRV_LOG(ERR,
|
||||
"port %u get aged flows is not supported.",
|
||||
dev->data->port_id);
|
||||
DRV_LOG(ERR, "port %u get aged flows is not supported.",
|
||||
dev->data->port_id);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get aged-out flows per HWS queue.
|
||||
*
|
||||
* @param[in] dev
|
||||
* Pointer to the Ethernet device structure.
|
||||
* @param[in] queue_id
|
||||
* Flow queue to query.
|
||||
* @param[in] context
|
||||
* The address of an array of pointers to the aged-out flows contexts.
|
||||
* @param[in] nb_countexts
|
||||
* The length of context array pointers.
|
||||
* @param[out] error
|
||||
* Perform verbose error reporting if not NULL. Initialized in case of
|
||||
* error only.
|
||||
*
|
||||
* @return
|
||||
* how many contexts get in success, otherwise negative errno value.
|
||||
* if nb_contexts is 0, return the amount of all aged contexts.
|
||||
* if nb_contexts is not 0 , return the amount of aged flows reported
|
||||
* in the context array.
|
||||
*/
|
||||
int
|
||||
mlx5_flow_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
|
||||
void **contexts, uint32_t nb_contexts,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
const struct mlx5_flow_driver_ops *fops;
|
||||
struct rte_flow_attr attr = { 0 };
|
||||
|
||||
if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_HW) {
|
||||
fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
|
||||
return fops->get_q_aged_flows(dev, queue_id, contexts,
|
||||
nb_contexts, error);
|
||||
}
|
||||
DRV_LOG(ERR, "port %u queue %u get aged flows is not supported.",
|
||||
dev->data->port_id, queue_id);
|
||||
return rte_flow_error_set(error, ENOTSUP,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
||||
"get Q aged flows with incorrect steering mode");
|
||||
}
|
||||
|
||||
/* Wrapper for driver action_validate op callback */
|
||||
static int
|
||||
flow_drv_action_validate(struct rte_eth_dev *dev,
|
||||
|
@ -294,6 +294,8 @@ enum mlx5_feature_name {
|
||||
#define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
|
||||
#define MLX5_FLOW_ACTION_CT (1ull << 41)
|
||||
#define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
|
||||
#define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
|
||||
#define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
|
||||
|
||||
#define MLX5_FLOW_FATE_ACTIONS \
|
||||
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
|
||||
@ -1102,6 +1104,22 @@ struct rte_flow {
|
||||
uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
|
||||
} __rte_packed;
|
||||
|
||||
/*
|
||||
* HWS COUNTER ID's layout
|
||||
* 3 2 1 0
|
||||
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
* | T | | D | |
|
||||
* ~ Y | | C | IDX ~
|
||||
* | P | | S | |
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*
|
||||
* Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
|
||||
* Bit 25:24 = DCS index
|
||||
* Bit 23:00 = IDX in this counter belonged DCS bulk.
|
||||
*/
|
||||
typedef uint32_t cnt_id_t;
|
||||
|
||||
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
|
||||
|
||||
#ifdef PEDANTIC
|
||||
@ -1118,7 +1136,8 @@ struct rte_flow_hw {
|
||||
struct mlx5_hrxq *hrxq; /* TIR action. */
|
||||
};
|
||||
struct rte_flow_template_table *table; /* The table flow allcated from. */
|
||||
uint32_t cnt_id;
|
||||
uint32_t age_idx;
|
||||
cnt_id_t cnt_id;
|
||||
uint32_t mtr_id;
|
||||
uint8_t rule[0]; /* HWS layer data struct. */
|
||||
} __rte_packed;
|
||||
@ -1169,7 +1188,7 @@ struct mlx5_action_construct_data {
|
||||
uint32_t idx; /* Shared action index. */
|
||||
} shared_rss;
|
||||
struct {
|
||||
uint32_t id;
|
||||
cnt_id_t id;
|
||||
} shared_counter;
|
||||
struct {
|
||||
uint32_t id;
|
||||
@ -1200,6 +1219,7 @@ struct rte_flow_actions_template {
|
||||
struct rte_flow_action *actions; /* Cached flow actions. */
|
||||
struct rte_flow_action *masks; /* Cached action masks.*/
|
||||
struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
|
||||
uint64_t action_flags; /* Bit-map of all valid action in template. */
|
||||
uint16_t dr_actions_num; /* Amount of DR rules actions. */
|
||||
uint16_t actions_num; /* Amount of flow actions */
|
||||
uint16_t *actions_off; /* DR action offset for given rte action offset. */
|
||||
@ -1256,7 +1276,7 @@ struct mlx5_hw_actions {
|
||||
struct mlx5_hw_encap_decap_action *encap_decap;
|
||||
uint16_t encap_decap_pos; /* Encap/Decap action position. */
|
||||
uint32_t mark:1; /* Indicate the mark action. */
|
||||
uint32_t cnt_id; /* Counter id. */
|
||||
cnt_id_t cnt_id; /* Counter id. */
|
||||
uint32_t mtr_id; /* Meter id. */
|
||||
/* Translated DR action array from action template. */
|
||||
struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
|
||||
@ -1632,6 +1652,12 @@ typedef int (*mlx5_flow_get_aged_flows_t)
|
||||
void **context,
|
||||
uint32_t nb_contexts,
|
||||
struct rte_flow_error *error);
|
||||
typedef int (*mlx5_flow_get_q_aged_flows_t)
|
||||
(struct rte_eth_dev *dev,
|
||||
uint32_t queue_id,
|
||||
void **context,
|
||||
uint32_t nb_contexts,
|
||||
struct rte_flow_error *error);
|
||||
typedef int (*mlx5_flow_action_validate_t)
|
||||
(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_indir_action_conf *conf,
|
||||
@ -1838,6 +1864,7 @@ struct mlx5_flow_driver_ops {
|
||||
mlx5_flow_counter_free_t counter_free;
|
||||
mlx5_flow_counter_query_t counter_query;
|
||||
mlx5_flow_get_aged_flows_t get_aged_flows;
|
||||
mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
|
||||
mlx5_flow_action_validate_t action_validate;
|
||||
mlx5_flow_action_create_t action_create;
|
||||
mlx5_flow_action_destroy_t action_destroy;
|
||||
|
@ -5520,7 +5520,7 @@ flow_dv_validate_action_age(uint64_t action_flags,
|
||||
const struct rte_flow_action_age *age = action->conf;
|
||||
|
||||
if (!priv->sh->cdev->config.devx ||
|
||||
(priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
|
||||
(priv->sh->sws_cmng.counter_fallback && !priv->sh->aso_age_mng))
|
||||
return rte_flow_error_set(error, ENOTSUP,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL,
|
||||
@ -6081,7 +6081,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_counter_pool **ppool)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
struct mlx5_flow_counter_pool *pool;
|
||||
|
||||
/* Decrease to original index and clear shared bit. */
|
||||
@ -6175,7 +6175,7 @@ static int
|
||||
flow_dv_container_resize(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
void *old_pools = cmng->pools;
|
||||
uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
|
||||
uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
|
||||
@ -6221,7 +6221,7 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
|
||||
|
||||
cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
|
||||
MLX5_ASSERT(pool);
|
||||
if (priv->sh->cmng.counter_fallback)
|
||||
if (priv->sh->sws_cmng.counter_fallback)
|
||||
return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
|
||||
0, pkts, bytes, 0, NULL, NULL, 0);
|
||||
rte_spinlock_lock(&pool->sl);
|
||||
@ -6258,8 +6258,8 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_counter_pool *pool;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
bool fallback = priv->sh->cmng.counter_fallback;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
bool fallback = priv->sh->sws_cmng.counter_fallback;
|
||||
uint32_t size = sizeof(*pool);
|
||||
|
||||
size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
|
||||
@ -6320,14 +6320,14 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
|
||||
uint32_t age)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
struct mlx5_flow_counter_pool *pool;
|
||||
struct mlx5_counters tmp_tq;
|
||||
struct mlx5_devx_obj *dcs = NULL;
|
||||
struct mlx5_flow_counter *cnt;
|
||||
enum mlx5_counter_type cnt_type =
|
||||
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
|
||||
bool fallback = priv->sh->cmng.counter_fallback;
|
||||
bool fallback = priv->sh->sws_cmng.counter_fallback;
|
||||
uint32_t i;
|
||||
|
||||
if (fallback) {
|
||||
@ -6391,8 +6391,8 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_counter_pool *pool = NULL;
|
||||
struct mlx5_flow_counter *cnt_free = NULL;
|
||||
bool fallback = priv->sh->cmng.counter_fallback;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
bool fallback = priv->sh->sws_cmng.counter_fallback;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
enum mlx5_counter_type cnt_type =
|
||||
age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
|
||||
uint32_t cnt_idx;
|
||||
@ -6438,7 +6438,7 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
|
||||
if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
|
||||
&cnt_free->bytes))
|
||||
goto err;
|
||||
if (!fallback && !priv->sh->cmng.query_thread_on)
|
||||
if (!fallback && !priv->sh->sws_cmng.query_thread_on)
|
||||
/* Start the asynchronous batch query by the host thread. */
|
||||
mlx5_set_query_alarm(priv->sh);
|
||||
/*
|
||||
@ -6566,7 +6566,7 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
|
||||
* this case, lock will not be needed as query callback and release
|
||||
* function both operate with the different list.
|
||||
*/
|
||||
if (!priv->sh->cmng.counter_fallback) {
|
||||
if (!priv->sh->sws_cmng.counter_fallback) {
|
||||
rte_spinlock_lock(&pool->csl);
|
||||
TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
|
||||
rte_spinlock_unlock(&pool->csl);
|
||||
@ -6574,10 +6574,10 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
|
||||
cnt->dcs_when_free = cnt->dcs_when_active;
|
||||
cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
|
||||
MLX5_COUNTER_TYPE_ORIGIN;
|
||||
rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
|
||||
TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
|
||||
rte_spinlock_lock(&priv->sh->sws_cmng.csl[cnt_type]);
|
||||
TAILQ_INSERT_TAIL(&priv->sh->sws_cmng.counters[cnt_type],
|
||||
cnt, next);
|
||||
rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
|
||||
rte_spinlock_unlock(&priv->sh->sws_cmng.csl[cnt_type]);
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -122,7 +122,7 @@ flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_counter_pool **ppool)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
struct mlx5_flow_counter_pool *pool;
|
||||
|
||||
idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
|
||||
@ -215,7 +215,7 @@ static uint32_t
|
||||
flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
|
||||
struct mlx5_flow_counter_mng *cmng = &priv->sh->sws_cmng;
|
||||
struct mlx5_flow_counter_pool *pool = NULL;
|
||||
struct mlx5_flow_counter *cnt = NULL;
|
||||
uint32_t n_valid = cmng->n_valid;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <rte_ring.h>
|
||||
#include <mlx5_devx_cmds.h>
|
||||
#include <rte_cycles.h>
|
||||
#include <rte_eal_paging.h>
|
||||
|
||||
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
|
||||
|
||||
@ -26,8 +27,8 @@ __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool)
|
||||
uint32_t preload;
|
||||
uint32_t q_num = cpool->cache->q_num;
|
||||
uint32_t cnt_num = mlx5_hws_cnt_pool_get_size(cpool);
|
||||
cnt_id_t cnt_id, iidx = 0;
|
||||
uint32_t qidx;
|
||||
cnt_id_t cnt_id;
|
||||
uint32_t qidx, iidx = 0;
|
||||
struct rte_ring *qcache = NULL;
|
||||
|
||||
/*
|
||||
@ -86,6 +87,174 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
|
||||
} while (reset_cnt_num > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Release AGE parameter.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to the port private data structure.
|
||||
* @param own_cnt_index
|
||||
* Counter ID to created only for this AGE to release.
|
||||
* Zero means there is no such counter.
|
||||
* @param age_ipool
|
||||
* Pointer to AGE parameter indexed pool.
|
||||
* @param idx
|
||||
* Index of AGE parameter in the indexed pool.
|
||||
*/
|
||||
static void
|
||||
mlx5_hws_age_param_free(struct mlx5_priv *priv, cnt_id_t own_cnt_index,
|
||||
struct mlx5_indexed_pool *age_ipool, uint32_t idx)
|
||||
{
|
||||
if (own_cnt_index) {
|
||||
struct mlx5_hws_cnt_pool *cpool = priv->hws_cpool;
|
||||
|
||||
MLX5_ASSERT(mlx5_hws_cnt_is_shared(cpool, own_cnt_index));
|
||||
mlx5_hws_cnt_shared_put(cpool, &own_cnt_index);
|
||||
}
|
||||
mlx5_ipool_free(age_ipool, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check and callback event for new aged flow in the HWS counter pool.
|
||||
*
|
||||
* @param[in] priv
|
||||
* Pointer to port private object.
|
||||
* @param[in] cpool
|
||||
* Pointer to current counter pool.
|
||||
*/
|
||||
static void
|
||||
mlx5_hws_aging_check(struct mlx5_priv *priv, struct mlx5_hws_cnt_pool *cpool)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct flow_counter_stats *stats = cpool->raw_mng->raw;
|
||||
struct mlx5_hws_age_param *param;
|
||||
struct rte_ring *r;
|
||||
const uint64_t curr_time = MLX5_CURR_TIME_SEC;
|
||||
const uint32_t time_delta = curr_time - cpool->time_of_last_age_check;
|
||||
uint32_t nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(cpool);
|
||||
uint16_t expected1 = HWS_AGE_CANDIDATE;
|
||||
uint16_t expected2 = HWS_AGE_CANDIDATE_INSIDE_RING;
|
||||
uint32_t i;
|
||||
|
||||
cpool->time_of_last_age_check = curr_time;
|
||||
for (i = 0; i < nb_alloc_cnts; ++i) {
|
||||
uint32_t age_idx = cpool->pool[i].age_idx;
|
||||
uint64_t hits;
|
||||
|
||||
if (!cpool->pool[i].in_used || age_idx == 0)
|
||||
continue;
|
||||
param = mlx5_ipool_get(age_info->ages_ipool, age_idx);
|
||||
if (unlikely(param == NULL)) {
|
||||
/*
|
||||
* When AGE which used indirect counter it is user
|
||||
* responsibility not using this indirect counter
|
||||
* without this AGE.
|
||||
* If this counter is used after the AGE was freed, the
|
||||
* AGE index is invalid and using it here will cause a
|
||||
* segmentation fault.
|
||||
*/
|
||||
DRV_LOG(WARNING,
|
||||
"Counter %u is lost his AGE, it is unused.", i);
|
||||
continue;
|
||||
}
|
||||
if (param->timeout == 0)
|
||||
continue;
|
||||
switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
|
||||
case HWS_AGE_AGED_OUT_NOT_REPORTED:
|
||||
case HWS_AGE_AGED_OUT_REPORTED:
|
||||
/* Already aged-out, no action is needed. */
|
||||
continue;
|
||||
case HWS_AGE_CANDIDATE:
|
||||
case HWS_AGE_CANDIDATE_INSIDE_RING:
|
||||
/* This AGE candidate to be aged-out, go to checking. */
|
||||
break;
|
||||
case HWS_AGE_FREE:
|
||||
/*
|
||||
* AGE parameter with state "FREE" couldn't be pointed
|
||||
* by any counter since counter is destroyed first.
|
||||
* Fall-through.
|
||||
*/
|
||||
default:
|
||||
MLX5_ASSERT(0);
|
||||
continue;
|
||||
}
|
||||
hits = rte_be_to_cpu_64(stats[i].hits);
|
||||
if (param->nb_cnts == 1) {
|
||||
if (hits != param->accumulator_last_hits) {
|
||||
__atomic_store_n(¶m->sec_since_last_hit, 0,
|
||||
__ATOMIC_RELAXED);
|
||||
param->accumulator_last_hits = hits;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
param->accumulator_hits += hits;
|
||||
param->accumulator_cnt++;
|
||||
if (param->accumulator_cnt < param->nb_cnts)
|
||||
continue;
|
||||
param->accumulator_cnt = 0;
|
||||
if (param->accumulator_last_hits !=
|
||||
param->accumulator_hits) {
|
||||
__atomic_store_n(¶m->sec_since_last_hit,
|
||||
0, __ATOMIC_RELAXED);
|
||||
param->accumulator_last_hits =
|
||||
param->accumulator_hits;
|
||||
param->accumulator_hits = 0;
|
||||
continue;
|
||||
}
|
||||
param->accumulator_hits = 0;
|
||||
}
|
||||
if (__atomic_add_fetch(¶m->sec_since_last_hit, time_delta,
|
||||
__ATOMIC_RELAXED) <=
|
||||
__atomic_load_n(¶m->timeout, __ATOMIC_RELAXED))
|
||||
continue;
|
||||
/* Prepare the relevant ring for this AGE parameter */
|
||||
if (priv->hws_strict_queue)
|
||||
r = age_info->hw_q_age->aged_lists[param->queue_id];
|
||||
else
|
||||
r = age_info->hw_age.aged_list;
|
||||
/* Changing the state atomically and insert it into the ring. */
|
||||
if (__atomic_compare_exchange_n(¶m->state, &expected1,
|
||||
HWS_AGE_AGED_OUT_NOT_REPORTED,
|
||||
false, __ATOMIC_RELAXED,
|
||||
__ATOMIC_RELAXED)) {
|
||||
int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
|
||||
sizeof(uint32_t),
|
||||
1, NULL);
|
||||
|
||||
/*
|
||||
* The ring doesn't have enough room for this entry,
|
||||
* it replace back the state for the next second.
|
||||
*
|
||||
* FIXME: if until next sec it get traffic, we are going
|
||||
* to lose this "aged out", will be fixed later
|
||||
* when optimise it to fill ring in bulks.
|
||||
*/
|
||||
expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
|
||||
if (ret == 0 &&
|
||||
!__atomic_compare_exchange_n(¶m->state,
|
||||
&expected2, expected1,
|
||||
false,
|
||||
__ATOMIC_RELAXED,
|
||||
__ATOMIC_RELAXED) &&
|
||||
expected2 == HWS_AGE_FREE)
|
||||
mlx5_hws_age_param_free(priv,
|
||||
param->own_cnt_index,
|
||||
age_info->ages_ipool,
|
||||
age_idx);
|
||||
/* The event is irrelevant in strict queue mode. */
|
||||
if (!priv->hws_strict_queue)
|
||||
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
|
||||
} else {
|
||||
__atomic_compare_exchange_n(¶m->state, &expected2,
|
||||
HWS_AGE_AGED_OUT_NOT_REPORTED,
|
||||
false, __ATOMIC_RELAXED,
|
||||
__ATOMIC_RELAXED);
|
||||
}
|
||||
}
|
||||
/* The event is irrelevant in strict queue mode. */
|
||||
if (!priv->hws_strict_queue)
|
||||
mlx5_age_event_prepare(priv->sh);
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_hws_cnt_raw_data_free(struct mlx5_dev_ctx_shared *sh,
|
||||
struct mlx5_hws_cnt_raw_data_mng *mng)
|
||||
@ -104,12 +273,14 @@ mlx5_hws_cnt_raw_data_alloc(struct mlx5_dev_ctx_shared *sh, uint32_t n)
|
||||
struct mlx5_hws_cnt_raw_data_mng *mng = NULL;
|
||||
int ret;
|
||||
size_t sz = n * sizeof(struct flow_counter_stats);
|
||||
size_t pgsz = rte_mem_page_size();
|
||||
|
||||
MLX5_ASSERT(pgsz > 0);
|
||||
mng = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*mng), 0,
|
||||
SOCKET_ID_ANY);
|
||||
if (mng == NULL)
|
||||
goto error;
|
||||
mng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, 0,
|
||||
mng->raw = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sz, pgsz,
|
||||
SOCKET_ID_ANY);
|
||||
if (mng->raw == NULL)
|
||||
goto error;
|
||||
@ -146,6 +317,9 @@ mlx5_hws_cnt_svc(void *opaque)
|
||||
opriv->sh == sh &&
|
||||
opriv->hws_cpool != NULL) {
|
||||
__mlx5_hws_cnt_svc(sh, opriv->hws_cpool);
|
||||
if (opriv->hws_age_req)
|
||||
mlx5_hws_aging_check(opriv,
|
||||
opriv->hws_cpool);
|
||||
}
|
||||
}
|
||||
query_cycle = rte_rdtsc() - start_cycle;
|
||||
@ -158,8 +332,9 @@ mlx5_hws_cnt_svc(void *opaque)
|
||||
}
|
||||
|
||||
struct mlx5_hws_cnt_pool *
|
||||
mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
|
||||
const struct mlx5_hws_cache_param *ccfg)
|
||||
mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
|
||||
const struct mlx5_hws_cnt_pool_cfg *pcfg,
|
||||
const struct mlx5_hws_cache_param *ccfg)
|
||||
{
|
||||
char mz_name[RTE_MEMZONE_NAMESIZE];
|
||||
struct mlx5_hws_cnt_pool *cntp;
|
||||
@ -185,16 +360,26 @@ mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
|
||||
cntp->cache->preload_sz = ccfg->preload_sz;
|
||||
cntp->cache->threshold = ccfg->threshold;
|
||||
cntp->cache->q_num = ccfg->q_num;
|
||||
if (pcfg->request_num > sh->hws_max_nb_counters) {
|
||||
DRV_LOG(ERR, "Counter number %u "
|
||||
"is greater than the maximum supported (%u).",
|
||||
pcfg->request_num, sh->hws_max_nb_counters);
|
||||
goto error;
|
||||
}
|
||||
cnt_num = pcfg->request_num * (100 + pcfg->alloc_factor) / 100;
|
||||
if (cnt_num > UINT32_MAX) {
|
||||
DRV_LOG(ERR, "counter number %"PRIu64" is out of 32bit range",
|
||||
cnt_num);
|
||||
goto error;
|
||||
}
|
||||
/*
|
||||
* When counter request number is supported, but the factor takes it
|
||||
* out of size, the factor is reduced.
|
||||
*/
|
||||
cnt_num = RTE_MIN((uint32_t)cnt_num, sh->hws_max_nb_counters);
|
||||
cntp->pool = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
|
||||
sizeof(struct mlx5_hws_cnt) *
|
||||
pcfg->request_num * (100 + pcfg->alloc_factor) / 100,
|
||||
0, SOCKET_ID_ANY);
|
||||
sizeof(struct mlx5_hws_cnt) * cnt_num,
|
||||
0, SOCKET_ID_ANY);
|
||||
if (cntp->pool == NULL)
|
||||
goto error;
|
||||
snprintf(mz_name, sizeof(mz_name), "%s_F_RING", pcfg->name);
|
||||
@ -231,6 +416,8 @@ mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
|
||||
if (cntp->cache->qcache[qidx] == NULL)
|
||||
goto error;
|
||||
}
|
||||
/* Initialize the time for aging-out calculation. */
|
||||
cntp->time_of_last_age_check = MLX5_CURR_TIME_SEC;
|
||||
return cntp;
|
||||
error:
|
||||
mlx5_hws_cnt_pool_deinit(cntp);
|
||||
@ -297,19 +484,17 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
|
||||
struct mlx5_hws_cnt_pool *cpool)
|
||||
{
|
||||
struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
|
||||
uint32_t max_log_bulk_sz = 0;
|
||||
uint32_t max_log_bulk_sz = sh->hws_max_log_bulk_sz;
|
||||
uint32_t log_bulk_sz;
|
||||
uint32_t idx, alloced = 0;
|
||||
uint32_t idx, alloc_candidate, alloced = 0;
|
||||
unsigned int cnt_num = mlx5_hws_cnt_pool_get_size(cpool);
|
||||
struct mlx5_devx_counter_attr attr = {0};
|
||||
struct mlx5_devx_obj *dcs;
|
||||
|
||||
if (hca_attr->flow_counter_bulk_log_max_alloc == 0) {
|
||||
DRV_LOG(ERR,
|
||||
"Fw doesn't support bulk log max alloc");
|
||||
DRV_LOG(ERR, "Fw doesn't support bulk log max alloc");
|
||||
return -1;
|
||||
}
|
||||
max_log_bulk_sz = 23; /* hard code to 8M (1 << 23). */
|
||||
cnt_num = RTE_ALIGN_CEIL(cnt_num, 4); /* minimal 4 counter in bulk. */
|
||||
log_bulk_sz = RTE_MIN(max_log_bulk_sz, rte_log2_u32(cnt_num));
|
||||
attr.pd = sh->cdev->pdn;
|
||||
@ -327,18 +512,23 @@ mlx5_hws_cnt_pool_dcs_alloc(struct mlx5_dev_ctx_shared *sh,
|
||||
cpool->dcs_mng.dcs[0].iidx = 0;
|
||||
alloced = cpool->dcs_mng.dcs[0].batch_sz;
|
||||
if (cnt_num > cpool->dcs_mng.dcs[0].batch_sz) {
|
||||
for (; idx < MLX5_HWS_CNT_DCS_NUM; idx++) {
|
||||
while (idx < MLX5_HWS_CNT_DCS_NUM) {
|
||||
attr.flow_counter_bulk_log_size = --max_log_bulk_sz;
|
||||
alloc_candidate = RTE_BIT32(max_log_bulk_sz);
|
||||
if (alloced + alloc_candidate > sh->hws_max_nb_counters)
|
||||
continue;
|
||||
dcs = mlx5_devx_cmd_flow_counter_alloc_general
|
||||
(sh->cdev->ctx, &attr);
|
||||
if (dcs == NULL)
|
||||
goto error;
|
||||
cpool->dcs_mng.dcs[idx].obj = dcs;
|
||||
cpool->dcs_mng.dcs[idx].batch_sz =
|
||||
(1 << max_log_bulk_sz);
|
||||
cpool->dcs_mng.dcs[idx].batch_sz = alloc_candidate;
|
||||
cpool->dcs_mng.dcs[idx].iidx = alloced;
|
||||
alloced += cpool->dcs_mng.dcs[idx].batch_sz;
|
||||
cpool->dcs_mng.batch_total++;
|
||||
if (alloced >= cnt_num)
|
||||
break;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -445,7 +635,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
|
||||
dev->data->port_id);
|
||||
pcfg.name = mp_name;
|
||||
pcfg.request_num = pattr->nb_counters;
|
||||
cpool = mlx5_hws_cnt_pool_init(&pcfg, &cparam);
|
||||
cpool = mlx5_hws_cnt_pool_init(priv->sh, &pcfg, &cparam);
|
||||
if (cpool == NULL)
|
||||
goto error;
|
||||
ret = mlx5_hws_cnt_pool_dcs_alloc(priv->sh, cpool);
|
||||
@ -525,4 +715,533 @@ mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh)
|
||||
sh->cnt_svc = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy AGE action.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to the port private data structure.
|
||||
* @param idx
|
||||
* Index of AGE parameter.
|
||||
* @param error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
|
||||
struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
|
||||
|
||||
if (param == NULL)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
||||
"invalid AGE parameter index");
|
||||
switch (__atomic_exchange_n(¶m->state, HWS_AGE_FREE,
|
||||
__ATOMIC_RELAXED)) {
|
||||
case HWS_AGE_CANDIDATE:
|
||||
case HWS_AGE_AGED_OUT_REPORTED:
|
||||
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
|
||||
break;
|
||||
case HWS_AGE_AGED_OUT_NOT_REPORTED:
|
||||
case HWS_AGE_CANDIDATE_INSIDE_RING:
|
||||
/*
|
||||
* In both cases AGE is inside the ring. Change the state here
|
||||
* and destroy it later when it is taken out of ring.
|
||||
*/
|
||||
break;
|
||||
case HWS_AGE_FREE:
|
||||
/*
|
||||
* If index is valid and state is FREE, it says this AGE has
|
||||
* been freed for the user but not for the PMD since it is
|
||||
* inside the ring.
|
||||
*/
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
||||
"this AGE has already been released");
|
||||
default:
|
||||
MLX5_ASSERT(0);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create AGE action parameter.
|
||||
*
|
||||
* @param[in] priv
|
||||
* Pointer to the port private data structure.
|
||||
* @param[in] queue_id
|
||||
* Which HWS queue to be used.
|
||||
* @param[in] shared
|
||||
* Whether it indirect AGE action.
|
||||
* @param[in] flow_idx
|
||||
* Flow index from indexed pool.
|
||||
* For indirect AGE action it doesn't affect.
|
||||
* @param[in] age
|
||||
* Pointer to the aging action configuration.
|
||||
* @param[out] error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
* @return
|
||||
* Index to AGE action parameter on success, 0 otherwise.
|
||||
*/
|
||||
uint32_t
|
||||
mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
|
||||
bool shared, const struct rte_flow_action_age *age,
|
||||
uint32_t flow_idx, struct rte_flow_error *error)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
|
||||
struct mlx5_hws_age_param *param;
|
||||
uint32_t age_idx;
|
||||
|
||||
param = mlx5_ipool_malloc(ipool, &age_idx);
|
||||
if (param == NULL) {
|
||||
rte_flow_error_set(error, ENOMEM,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
||||
"cannot allocate AGE parameter");
|
||||
return 0;
|
||||
}
|
||||
MLX5_ASSERT(__atomic_load_n(¶m->state,
|
||||
__ATOMIC_RELAXED) == HWS_AGE_FREE);
|
||||
if (shared) {
|
||||
param->nb_cnts = 0;
|
||||
param->accumulator_hits = 0;
|
||||
param->accumulator_cnt = 0;
|
||||
flow_idx = age_idx;
|
||||
} else {
|
||||
param->nb_cnts = 1;
|
||||
}
|
||||
param->context = age->context ? age->context :
|
||||
(void *)(uintptr_t)flow_idx;
|
||||
param->timeout = age->timeout;
|
||||
param->queue_id = queue_id;
|
||||
param->accumulator_last_hits = 0;
|
||||
param->own_cnt_index = 0;
|
||||
param->sec_since_last_hit = 0;
|
||||
param->state = HWS_AGE_CANDIDATE;
|
||||
return age_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update indirect AGE action parameter.
|
||||
*
|
||||
* @param[in] priv
|
||||
* Pointer to the port private data structure.
|
||||
* @param[in] idx
|
||||
* Index of AGE parameter.
|
||||
* @param[in] update
|
||||
* Update value.
|
||||
* @param[out] error
|
||||
* Pointer to error structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
|
||||
const void *update, struct rte_flow_error *error)
|
||||
{
|
||||
#ifdef MLX5_HAVE_RTE_FLOW_Q_AGE
|
||||
const struct rte_flow_update_age *update_ade = update;
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
|
||||
struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
|
||||
bool sec_since_last_hit_reset = false;
|
||||
bool state_update = false;
|
||||
|
||||
if (param == NULL)
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
||||
"invalid AGE parameter index");
|
||||
if (update_ade->timeout_valid) {
|
||||
uint32_t old_timeout = __atomic_exchange_n(¶m->timeout,
|
||||
update_ade->timeout,
|
||||
__ATOMIC_RELAXED);
|
||||
|
||||
if (old_timeout == 0)
|
||||
sec_since_last_hit_reset = true;
|
||||
else if (old_timeout < update_ade->timeout ||
|
||||
update_ade->timeout == 0)
|
||||
/*
|
||||
* When timeout is increased, aged-out flows might be
|
||||
* active again and state should be updated accordingly.
|
||||
* When new timeout is 0, we update the state for not
|
||||
* reporting aged-out stopped.
|
||||
*/
|
||||
state_update = true;
|
||||
}
|
||||
if (update_ade->touch) {
|
||||
sec_since_last_hit_reset = true;
|
||||
state_update = true;
|
||||
}
|
||||
if (sec_since_last_hit_reset)
|
||||
__atomic_store_n(¶m->sec_since_last_hit, 0,
|
||||
__ATOMIC_RELAXED);
|
||||
if (state_update) {
|
||||
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
|
||||
|
||||
/*
|
||||
* Change states of aged-out flows to active:
|
||||
* - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
|
||||
* - AGED_OUT_REPORTED -> CANDIDATE
|
||||
*/
|
||||
if (!__atomic_compare_exchange_n(¶m->state, &expected,
|
||||
HWS_AGE_CANDIDATE_INSIDE_RING,
|
||||
false, __ATOMIC_RELAXED,
|
||||
__ATOMIC_RELAXED) &&
|
||||
expected == HWS_AGE_AGED_OUT_REPORTED)
|
||||
__atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
|
||||
__ATOMIC_RELAXED);
|
||||
}
|
||||
return 0;
|
||||
#else
|
||||
RTE_SET_USED(priv);
|
||||
RTE_SET_USED(idx);
|
||||
RTE_SET_USED(update);
|
||||
return rte_flow_error_set(error, ENOTSUP,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
|
||||
"update age action not supported");
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the AGE context if the aged-out index is still valid.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to the port private data structure.
|
||||
* @param idx
|
||||
* Index of AGE parameter.
|
||||
*
|
||||
* @return
|
||||
* AGE context if the index is still aged-out, NULL otherwise.
|
||||
*/
|
||||
void *
|
||||
mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
|
||||
struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, idx);
|
||||
uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
|
||||
|
||||
MLX5_ASSERT(param != NULL);
|
||||
if (__atomic_compare_exchange_n(¶m->state, &expected,
|
||||
HWS_AGE_AGED_OUT_REPORTED, false,
|
||||
__ATOMIC_RELAXED, __ATOMIC_RELAXED))
|
||||
return param->context;
|
||||
switch (expected) {
|
||||
case HWS_AGE_FREE:
|
||||
/*
|
||||
* This AGE couldn't have been destroyed since it was inside
|
||||
* the ring. Its state has updated, and now it is actually
|
||||
* destroyed.
|
||||
*/
|
||||
mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
|
||||
break;
|
||||
case HWS_AGE_CANDIDATE_INSIDE_RING:
|
||||
__atomic_store_n(¶m->state, HWS_AGE_CANDIDATE,
|
||||
__ATOMIC_RELAXED);
|
||||
break;
|
||||
case HWS_AGE_CANDIDATE:
|
||||
/*
|
||||
* Only BG thread pushes to ring and it never pushes this state.
|
||||
* When AGE inside the ring becomes candidate, it has a special
|
||||
* state called HWS_AGE_CANDIDATE_INSIDE_RING.
|
||||
* Fall-through.
|
||||
*/
|
||||
case HWS_AGE_AGED_OUT_REPORTED:
|
||||
/*
|
||||
* Only this thread (doing query) may write this state, and it
|
||||
* happens only after the query thread takes it out of the ring.
|
||||
* Fall-through.
|
||||
*/
|
||||
case HWS_AGE_AGED_OUT_NOT_REPORTED:
|
||||
/*
|
||||
* In this case the compare return true and function return
|
||||
* the context immediately.
|
||||
* Fall-through.
|
||||
*/
|
||||
default:
|
||||
MLX5_ASSERT(0);
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef RTE_ARCH_64
|
||||
#define MLX5_HWS_AGED_OUT_RING_SIZE_MAX UINT32_MAX
|
||||
#else
|
||||
#define MLX5_HWS_AGED_OUT_RING_SIZE_MAX RTE_BIT32(8)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Get the size of aged out ring list for each queue.
|
||||
*
|
||||
* The size is one percent of nb_counters divided by nb_queues.
|
||||
* The ring size must be power of 2, so it align up to power of 2.
|
||||
* In 32 bit systems, the size is limited by 256.
|
||||
*
|
||||
* This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is on.
|
||||
*
|
||||
* @param nb_counters
|
||||
* Final number of allocated counter in the pool.
|
||||
* @param nb_queues
|
||||
* Number of HWS queues in this port.
|
||||
*
|
||||
* @return
|
||||
* Size of aged out ring per queue.
|
||||
*/
|
||||
static __rte_always_inline uint32_t
|
||||
mlx5_hws_aged_out_q_ring_size_get(uint32_t nb_counters, uint32_t nb_queues)
|
||||
{
|
||||
uint32_t size = rte_align32pow2((nb_counters / 100) / nb_queues);
|
||||
uint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;
|
||||
|
||||
return RTE_MIN(size, max_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the size of the aged out ring list.
|
||||
*
|
||||
* The size is one percent of nb_counters.
|
||||
* The ring size must be power of 2, so it align up to power of 2.
|
||||
* In 32 bit systems, the size is limited by 256.
|
||||
*
|
||||
* This function is called when RTE_FLOW_PORT_FLAG_STRICT_QUEUE is off.
|
||||
*
|
||||
* @param nb_counters
|
||||
* Final number of allocated counter in the pool.
|
||||
*
|
||||
* @return
|
||||
* Size of the aged out ring list.
|
||||
*/
|
||||
static __rte_always_inline uint32_t
|
||||
mlx5_hws_aged_out_ring_size_get(uint32_t nb_counters)
|
||||
{
|
||||
uint32_t size = rte_align32pow2(nb_counters / 100);
|
||||
uint32_t max_size = MLX5_HWS_AGED_OUT_RING_SIZE_MAX;
|
||||
|
||||
return RTE_MIN(size, max_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the shared aging list information per port.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to the rte_eth_dev structure.
|
||||
* @param nb_queues
|
||||
* Number of HWS queues.
|
||||
* @param strict_queue
|
||||
* Indicator whether is strict_queue mode.
|
||||
* @param ring_size
|
||||
* Size of aged-out ring for creation.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_hws_age_info_init(struct rte_eth_dev *dev, uint16_t nb_queues,
|
||||
bool strict_queue, uint32_t ring_size)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
uint32_t flags = RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ;
|
||||
char mz_name[RTE_MEMZONE_NAMESIZE];
|
||||
struct rte_ring *r = NULL;
|
||||
uint32_t qidx;
|
||||
|
||||
age_info->flags = 0;
|
||||
if (strict_queue) {
|
||||
size_t size = sizeof(*age_info->hw_q_age) +
|
||||
sizeof(struct rte_ring *) * nb_queues;
|
||||
|
||||
age_info->hw_q_age = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO,
|
||||
size, 0, SOCKET_ID_ANY);
|
||||
if (age_info->hw_q_age == NULL)
|
||||
return -ENOMEM;
|
||||
for (qidx = 0; qidx < nb_queues; ++qidx) {
|
||||
snprintf(mz_name, sizeof(mz_name),
|
||||
"port_%u_queue_%u_aged_out_ring",
|
||||
dev->data->port_id, qidx);
|
||||
r = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY,
|
||||
flags);
|
||||
if (r == NULL) {
|
||||
DRV_LOG(ERR, "\"%s\" creation failed: %s",
|
||||
mz_name, rte_strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
age_info->hw_q_age->aged_lists[qidx] = r;
|
||||
DRV_LOG(DEBUG,
|
||||
"\"%s\" is successfully created (size=%u).",
|
||||
mz_name, ring_size);
|
||||
}
|
||||
age_info->hw_q_age->nb_rings = nb_queues;
|
||||
} else {
|
||||
snprintf(mz_name, sizeof(mz_name), "port_%u_aged_out_ring",
|
||||
dev->data->port_id);
|
||||
r = rte_ring_create(mz_name, ring_size, SOCKET_ID_ANY, flags);
|
||||
if (r == NULL) {
|
||||
DRV_LOG(ERR, "\"%s\" creation failed: %s", mz_name,
|
||||
rte_strerror(rte_errno));
|
||||
return -rte_errno;
|
||||
}
|
||||
age_info->hw_age.aged_list = r;
|
||||
DRV_LOG(DEBUG, "\"%s\" is successfully created (size=%u).",
|
||||
mz_name, ring_size);
|
||||
/* In non "strict_queue" mode, initialize the event. */
|
||||
MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
|
||||
}
|
||||
return 0;
|
||||
error:
|
||||
MLX5_ASSERT(strict_queue);
|
||||
while (qidx--)
|
||||
rte_ring_free(age_info->hw_q_age->aged_lists[qidx]);
|
||||
mlx5_free(age_info->hw_q_age);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup aged-out ring before destroying.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to port private object.
|
||||
* @param r
|
||||
* Pointer to aged-out ring object.
|
||||
*/
|
||||
static void
|
||||
mlx5_hws_aged_out_ring_cleanup(struct mlx5_priv *priv, struct rte_ring *r)
|
||||
{
|
||||
int ring_size = rte_ring_count(r);
|
||||
|
||||
while (ring_size > 0) {
|
||||
uint32_t age_idx = 0;
|
||||
|
||||
if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
|
||||
break;
|
||||
/* get the AGE context if the aged-out index is still valid. */
|
||||
mlx5_hws_age_context_get(priv, age_idx);
|
||||
ring_size--;
|
||||
}
|
||||
rte_ring_free(r);
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the shared aging list information per port.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to port private object.
|
||||
*/
|
||||
static void
|
||||
mlx5_hws_age_info_destroy(struct mlx5_priv *priv)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
uint16_t nb_queues = age_info->hw_q_age->nb_rings;
|
||||
struct rte_ring *r;
|
||||
|
||||
if (priv->hws_strict_queue) {
|
||||
uint32_t qidx;
|
||||
|
||||
for (qidx = 0; qidx < nb_queues; ++qidx) {
|
||||
r = age_info->hw_q_age->aged_lists[qidx];
|
||||
mlx5_hws_aged_out_ring_cleanup(priv, r);
|
||||
}
|
||||
mlx5_free(age_info->hw_q_age);
|
||||
} else {
|
||||
r = age_info->hw_age.aged_list;
|
||||
mlx5_hws_aged_out_ring_cleanup(priv, r);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the aging mechanism per port.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to the rte_eth_dev structure.
|
||||
* @param attr
|
||||
* Port configuration attributes.
|
||||
* @param nb_queues
|
||||
* Number of HWS queues.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_port_attr *attr,
|
||||
uint16_t nb_queues)
|
||||
{
|
||||
struct mlx5_priv *priv = dev->data->dev_private;
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct mlx5_indexed_pool_config cfg = {
|
||||
.size =
|
||||
RTE_CACHE_LINE_ROUNDUP(sizeof(struct mlx5_hws_age_param)),
|
||||
.trunk_size = 1 << 12,
|
||||
.per_core_cache = 1 << 13,
|
||||
.need_lock = 1,
|
||||
.release_mem_en = !!priv->sh->config.reclaim_mode,
|
||||
.malloc = mlx5_malloc,
|
||||
.free = mlx5_free,
|
||||
.type = "mlx5_hws_age_pool",
|
||||
};
|
||||
bool strict_queue = false;
|
||||
uint32_t nb_alloc_cnts;
|
||||
uint32_t rsize;
|
||||
uint32_t nb_ages_updated;
|
||||
int ret;
|
||||
|
||||
#ifdef MLX5_HAVE_RTE_FLOW_Q_AGE
|
||||
strict_queue = !!(attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
|
||||
#endif
|
||||
MLX5_ASSERT(priv->hws_cpool);
|
||||
nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool);
|
||||
if (strict_queue) {
|
||||
rsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts,
|
||||
nb_queues);
|
||||
nb_ages_updated = rsize * nb_queues + attr->nb_aging_objects;
|
||||
} else {
|
||||
rsize = mlx5_hws_aged_out_ring_size_get(nb_alloc_cnts);
|
||||
nb_ages_updated = rsize + attr->nb_aging_objects;
|
||||
}
|
||||
ret = mlx5_hws_age_info_init(dev, nb_queues, strict_queue, rsize);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
cfg.max_idx = rte_align32pow2(nb_ages_updated);
|
||||
if (cfg.max_idx <= cfg.trunk_size) {
|
||||
cfg.per_core_cache = 0;
|
||||
cfg.trunk_size = cfg.max_idx;
|
||||
} else if (cfg.max_idx <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
|
||||
cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
|
||||
}
|
||||
age_info->ages_ipool = mlx5_ipool_create(&cfg);
|
||||
if (age_info->ages_ipool == NULL) {
|
||||
mlx5_hws_age_info_destroy(priv);
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
priv->hws_age_req = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup all aging resources per port.
|
||||
*
|
||||
* @param priv
|
||||
* Pointer to port private object.
|
||||
*/
|
||||
void
|
||||
mlx5_hws_age_pool_destroy(struct mlx5_priv *priv)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
|
||||
MLX5_ASSERT(priv->hws_age_req);
|
||||
mlx5_hws_age_info_destroy(priv);
|
||||
mlx5_ipool_destroy(age_info->ages_ipool);
|
||||
age_info->ages_ipool = NULL;
|
||||
priv->hws_age_req = 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -10,26 +10,26 @@
|
||||
#include "mlx5_flow.h"
|
||||
|
||||
/*
|
||||
* COUNTER ID's layout
|
||||
* HWS COUNTER ID's layout
|
||||
* 3 2 1 0
|
||||
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
* | T | | D | |
|
||||
* ~ Y | | C | IDX ~
|
||||
* | P | | S | |
|
||||
* | T | | D | |
|
||||
* ~ Y | | C | IDX ~
|
||||
* | P | | S | |
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*
|
||||
* Bit 31:30 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
|
||||
* Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
|
||||
* Bit 25:24 = DCS index
|
||||
* Bit 23:00 = IDX in this counter belonged DCS bulk.
|
||||
*/
|
||||
typedef uint32_t cnt_id_t;
|
||||
|
||||
#define MLX5_HWS_CNT_DCS_NUM 4
|
||||
#define MLX5_HWS_CNT_DCS_IDX_OFFSET 24
|
||||
#define MLX5_HWS_CNT_DCS_IDX_MASK 0x3
|
||||
#define MLX5_HWS_CNT_IDX_MASK ((1UL << MLX5_HWS_CNT_DCS_IDX_OFFSET) - 1)
|
||||
|
||||
#define MLX5_HWS_AGE_IDX_MASK (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)
|
||||
|
||||
struct mlx5_hws_cnt_dcs {
|
||||
void *dr_action;
|
||||
uint32_t batch_sz;
|
||||
@ -44,12 +44,22 @@ struct mlx5_hws_cnt_dcs_mng {
|
||||
|
||||
struct mlx5_hws_cnt {
|
||||
struct flow_counter_stats reset;
|
||||
bool in_used; /* Indicator whether this counter in used or in pool. */
|
||||
union {
|
||||
uint32_t share: 1;
|
||||
/*
|
||||
* share will be set to 1 when this counter is used as indirect
|
||||
* action. Only meaningful when user own this counter.
|
||||
*/
|
||||
struct {
|
||||
uint32_t share:1;
|
||||
/*
|
||||
* share will be set to 1 when this counter is used as
|
||||
* indirect action.
|
||||
*/
|
||||
uint32_t age_idx:24;
|
||||
/*
|
||||
* When this counter uses for aging, it save the index
|
||||
* of AGE parameter. For pure counter (without aging)
|
||||
* this index is zero.
|
||||
*/
|
||||
};
|
||||
/* This struct is only meaningful when user own this counter. */
|
||||
uint32_t query_gen_when_free;
|
||||
/*
|
||||
* When PMD own this counter (user put back counter to PMD
|
||||
@ -96,8 +106,48 @@ struct mlx5_hws_cnt_pool {
|
||||
struct rte_ring *free_list;
|
||||
struct rte_ring *wait_reset_list;
|
||||
struct mlx5_hws_cnt_pool_caches *cache;
|
||||
uint64_t time_of_last_age_check;
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/* HWS AGE status. */
|
||||
enum {
|
||||
HWS_AGE_FREE, /* Initialized state. */
|
||||
HWS_AGE_CANDIDATE, /* AGE assigned to flows. */
|
||||
HWS_AGE_CANDIDATE_INSIDE_RING,
|
||||
/*
|
||||
* AGE assigned to flows but it still in ring. It was aged-out but the
|
||||
* timeout was changed, so it in ring but stiil candidate.
|
||||
*/
|
||||
HWS_AGE_AGED_OUT_REPORTED,
|
||||
/*
|
||||
* Aged-out, reported by rte_flow_get_q_aged_flows and wait for destroy.
|
||||
*/
|
||||
HWS_AGE_AGED_OUT_NOT_REPORTED,
|
||||
/*
|
||||
* Aged-out, inside the aged-out ring.
|
||||
* wait for rte_flow_get_q_aged_flows and destroy.
|
||||
*/
|
||||
};
|
||||
|
||||
/* HWS counter age parameter. */
|
||||
struct mlx5_hws_age_param {
|
||||
uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
|
||||
uint32_t sec_since_last_hit;
|
||||
/* Time in seconds since last hit (atomically accessed). */
|
||||
uint16_t state; /* AGE state (atomically accessed). */
|
||||
uint64_t accumulator_last_hits;
|
||||
/* Last total value of hits for comparing. */
|
||||
uint64_t accumulator_hits;
|
||||
/* Accumulator for hits coming from several counters. */
|
||||
uint32_t accumulator_cnt;
|
||||
/* Number counters which already updated the accumulator in this sec. */
|
||||
uint32_t nb_cnts; /* Number counters used by this AGE. */
|
||||
uint32_t queue_id; /* Queue id of the counter. */
|
||||
cnt_id_t own_cnt_index;
|
||||
/* Counter action created specifically for this AGE action. */
|
||||
void *context; /* Flow AGE context. */
|
||||
} __rte_packed __rte_cache_aligned;
|
||||
|
||||
/**
|
||||
* Translate counter id into internal index (start from 0), which can be used
|
||||
* as index of raw/cnt pool.
|
||||
@ -107,7 +157,7 @@ struct mlx5_hws_cnt_pool {
|
||||
* @return
|
||||
* Internal index
|
||||
*/
|
||||
static __rte_always_inline cnt_id_t
|
||||
static __rte_always_inline uint32_t
|
||||
mlx5_hws_cnt_iidx(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
|
||||
{
|
||||
uint8_t dcs_idx = cnt_id >> MLX5_HWS_CNT_DCS_IDX_OFFSET;
|
||||
@ -139,7 +189,7 @@ mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
|
||||
* Counter id
|
||||
*/
|
||||
static __rte_always_inline cnt_id_t
|
||||
mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, cnt_id_t iidx)
|
||||
mlx5_hws_cnt_id_gen(struct mlx5_hws_cnt_pool *cpool, uint32_t iidx)
|
||||
{
|
||||
struct mlx5_hws_cnt_dcs_mng *dcs_mng = &cpool->dcs_mng;
|
||||
uint32_t idx;
|
||||
@ -344,9 +394,10 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,
|
||||
struct rte_ring_zc_data zcdr = {0};
|
||||
struct rte_ring *qcache = NULL;
|
||||
unsigned int wb_num = 0; /* cache write-back number. */
|
||||
cnt_id_t iidx;
|
||||
uint32_t iidx;
|
||||
|
||||
iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
|
||||
cpool->pool[iidx].in_used = false;
|
||||
cpool->pool[iidx].query_gen_when_free =
|
||||
__atomic_load_n(&cpool->query_gen, __ATOMIC_RELAXED);
|
||||
if (likely(queue != NULL))
|
||||
@ -388,20 +439,23 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool,
|
||||
* A pointer to HWS queue. If null, it means fetch from common pool.
|
||||
* @param cnt_id
|
||||
* A pointer to a cnt_id_t * pointer (counter id) that will be filled.
|
||||
* @param age_idx
|
||||
* Index of AGE parameter using this counter, zero means there is no such AGE.
|
||||
*
|
||||
* @return
|
||||
* - 0: Success; objects taken.
|
||||
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
|
||||
* - -EAGAIN: counter is not ready; try again.
|
||||
*/
|
||||
static __rte_always_inline int
|
||||
mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,
|
||||
uint32_t *queue, cnt_id_t *cnt_id)
|
||||
mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
|
||||
cnt_id_t *cnt_id, uint32_t age_idx)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct rte_ring_zc_data zcdc = {0};
|
||||
struct rte_ring *qcache = NULL;
|
||||
uint32_t query_gen = 0;
|
||||
cnt_id_t iidx, tmp_cid = 0;
|
||||
uint32_t iidx, query_gen = 0;
|
||||
cnt_id_t tmp_cid = 0;
|
||||
|
||||
if (likely(queue != NULL))
|
||||
qcache = cpool->cache->qcache[*queue];
|
||||
@ -422,6 +476,8 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,
|
||||
__hws_cnt_query_raw(cpool, *cnt_id,
|
||||
&cpool->pool[iidx].reset.hits,
|
||||
&cpool->pool[iidx].reset.bytes);
|
||||
cpool->pool[iidx].in_used = true;
|
||||
cpool->pool[iidx].age_idx = age_idx;
|
||||
return 0;
|
||||
}
|
||||
ret = rte_ring_dequeue_zc_burst_elem_start(qcache, sizeof(cnt_id_t), 1,
|
||||
@ -455,6 +511,8 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool,
|
||||
&cpool->pool[iidx].reset.bytes);
|
||||
rte_ring_dequeue_zc_elem_finish(qcache, 1);
|
||||
cpool->pool[iidx].share = 0;
|
||||
cpool->pool[iidx].in_used = true;
|
||||
cpool->pool[iidx].age_idx = age_idx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -478,16 +536,16 @@ mlx5_hws_cnt_pool_get_action_offset(struct mlx5_hws_cnt_pool *cpool,
|
||||
}
|
||||
|
||||
static __rte_always_inline int
|
||||
mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
|
||||
mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
|
||||
uint32_t age_idx)
|
||||
{
|
||||
int ret;
|
||||
uint32_t iidx;
|
||||
|
||||
ret = mlx5_hws_cnt_pool_get(cpool, NULL, cnt_id);
|
||||
ret = mlx5_hws_cnt_pool_get(cpool, NULL, cnt_id, age_idx);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
|
||||
MLX5_ASSERT(cpool->pool[iidx].share == 0);
|
||||
cpool->pool[iidx].share = 1;
|
||||
return 0;
|
||||
}
|
||||
@ -513,10 +571,73 @@ mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
|
||||
return cpool->pool[iidx].share ? true : false;
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
|
||||
uint32_t age_idx)
|
||||
{
|
||||
uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
|
||||
|
||||
MLX5_ASSERT(cpool->pool[iidx].share);
|
||||
cpool->pool[iidx].age_idx = age_idx;
|
||||
}
|
||||
|
||||
static __rte_always_inline uint32_t
|
||||
mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
|
||||
{
|
||||
uint32_t iidx = mlx5_hws_cnt_iidx(cpool, cnt_id);
|
||||
|
||||
MLX5_ASSERT(cpool->pool[iidx].share);
|
||||
return cpool->pool[iidx].age_idx;
|
||||
}
|
||||
|
||||
static __rte_always_inline cnt_id_t
|
||||
mlx5_hws_age_cnt_get(struct mlx5_priv *priv, struct mlx5_hws_age_param *param,
|
||||
uint32_t age_idx)
|
||||
{
|
||||
if (!param->own_cnt_index) {
|
||||
/* Create indirect counter one for internal usage. */
|
||||
if (mlx5_hws_cnt_shared_get(priv->hws_cpool,
|
||||
¶m->own_cnt_index, age_idx) < 0)
|
||||
return 0;
|
||||
param->nb_cnts++;
|
||||
}
|
||||
return param->own_cnt_index;
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
mlx5_hws_age_nb_cnt_increase(struct mlx5_priv *priv, uint32_t age_idx)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
|
||||
struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
|
||||
|
||||
MLX5_ASSERT(param != NULL);
|
||||
param->nb_cnts++;
|
||||
}
|
||||
|
||||
static __rte_always_inline void
|
||||
mlx5_hws_age_nb_cnt_decrease(struct mlx5_priv *priv, uint32_t age_idx)
|
||||
{
|
||||
struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
|
||||
struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
|
||||
struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
|
||||
|
||||
if (param != NULL)
|
||||
param->nb_cnts--;
|
||||
}
|
||||
|
||||
static __rte_always_inline bool
|
||||
mlx5_hws_age_is_indirect(uint32_t age_idx)
|
||||
{
|
||||
return (age_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) ==
|
||||
MLX5_INDIRECT_ACTION_TYPE_AGE ? true : false;
|
||||
}
|
||||
|
||||
/* init HWS counter pool. */
|
||||
struct mlx5_hws_cnt_pool *
|
||||
mlx5_hws_cnt_pool_init(const struct mlx5_hws_cnt_pool_cfg *pcfg,
|
||||
const struct mlx5_hws_cache_param *ccfg);
|
||||
mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh,
|
||||
const struct mlx5_hws_cnt_pool_cfg *pcfg,
|
||||
const struct mlx5_hws_cache_param *ccfg);
|
||||
|
||||
void
|
||||
mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool *cntp);
|
||||
@ -555,4 +676,28 @@ mlx5_hws_cnt_svc_init(struct mlx5_dev_ctx_shared *sh);
|
||||
void
|
||||
mlx5_hws_cnt_svc_deinit(struct mlx5_dev_ctx_shared *sh);
|
||||
|
||||
int
|
||||
mlx5_hws_age_action_destroy(struct mlx5_priv *priv, uint32_t idx,
|
||||
struct rte_flow_error *error);
|
||||
|
||||
uint32_t
|
||||
mlx5_hws_age_action_create(struct mlx5_priv *priv, uint32_t queue_id,
|
||||
bool shared, const struct rte_flow_action_age *age,
|
||||
uint32_t flow_idx, struct rte_flow_error *error);
|
||||
|
||||
int
|
||||
mlx5_hws_age_action_update(struct mlx5_priv *priv, uint32_t idx,
|
||||
const void *update, struct rte_flow_error *error);
|
||||
|
||||
void *
|
||||
mlx5_hws_age_context_get(struct mlx5_priv *priv, uint32_t idx);
|
||||
|
||||
int
|
||||
mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_port_attr *attr,
|
||||
uint16_t nb_queues);
|
||||
|
||||
void
|
||||
mlx5_hws_age_pool_destroy(struct mlx5_priv *priv);
|
||||
|
||||
#endif /* _MLX5_HWS_CNT_H_ */
|
||||
|
@ -170,6 +170,14 @@ struct mlx5_l3t_tbl {
|
||||
typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx,
|
||||
union mlx5_l3t_data *data);
|
||||
|
||||
/*
|
||||
* The default ipool threshold value indicates which per_core_cache
|
||||
* value to set.
|
||||
*/
|
||||
#define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
|
||||
/* The default min local cache size. */
|
||||
#define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
|
||||
|
||||
/*
|
||||
* The indexed memory entry index is made up of trunk index and offset of
|
||||
* the entry in the trunk. Since the entry index is 32 bits, in case user
|
||||
@ -207,7 +215,7 @@ struct mlx5_indexed_pool_config {
|
||||
*/
|
||||
uint32_t need_lock:1;
|
||||
/* Lock is needed for multiple thread usage. */
|
||||
uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
|
||||
uint32_t release_mem_en:1; /* Release trunk when it is free. */
|
||||
uint32_t max_idx; /* The maximum index can be allocated. */
|
||||
uint32_t per_core_cache;
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user